source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
dgbsv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgbsv.c, normal z -> d, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gbsv
*
* Computes the solution to a system of linear equations A * X = B,
* using the LU factorization computed by plasma_dgbtrf.
*
*******************************************************************************
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* Details of the LU factorization of the band matrix A, as
* computed by plasma_dgbtrf.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
******************************************************************************/
int plasma_dgbsv(int n, int kl, int ku, int nrhs,
double *pAB, int ldab, int *ipiv,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (kl < 0) {
plasma_error("illegal value of kl");
return -2;
}
if (ku < 0) {
plasma_error("illegal value of ku");
return -3;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -4;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of ldab");
return -6;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -8;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gbtrf(plasma, PlasmaRealDouble, n, kl+ku+1);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t AB;
plasma_desc_t B;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use dgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaRealDouble, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, kl, ku, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dpb2desc(pAB, ldab, AB, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_dgbsv(AB, ipiv, B, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_ddesc2pb(AB, pAB, ldab, &sequence, &request);
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// Free matrices in tile layout.
plasma_desc_destroy(&B);
plasma_desc_destroy(&AB);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* Computes the solution to a system of linear equations A * X = B,
* using the LU factorization computed by plasma_dgbtrf.
* Non-blocking tile version of plasma_dgbsv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] AB
* Descriptor of matrix A.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* Descriptor of right-hand-sides B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
******************************************************************************/
void plasma_omp_dgbsv(plasma_desc_t AB, int *ipiv, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid AB");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel function.
plasma_pdgbtrf(AB, ipiv, sequence, request);
plasma_pdtbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pdtbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
|
GB_unop__asinh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asinh_fc32_fc32)
// op(A') function: GB (_unop_tran__asinh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = casinhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casinhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = casinhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASINH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asinh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = casinhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = casinhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asinh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int16_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_int32)
// op(A') function: GB (_unop_tran__identity_int16_int32)
// C type: int16_t
// A type: int32_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_int32)
(
int16_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c-parser.c | /* Parser for C and Objective-C.
Copyright (C) 1987-2018 Free Software Foundation, Inc.
Parser actions based on the old Bison parser; structure somewhat
influenced by and fragments based on the C++ parser.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* TODO:
Make sure all relevant comments, and all relevant code from all
actions, brought over from old parser. Verify exact correspondence
of syntax accepted.
Add testcases covering every input symbol in every state in old and
new parsers.
Include full syntax for GNU C, including erroneous cases accepted
with error messages, in syntax productions in comments.
Make more diagnostics in the front end generally take an explicit
location rather than implicitly using input_location. */
#include "config.h"
#define INCLUDE_UNIQUE_PTR
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "function.h"
#include "c-tree.h"
#include "timevar.h"
#include "stringpool.h"
#include "cgraph.h"
#include "attribs.h"
#include "stor-layout.h"
#include "varasm.h"
#include "trans-mem.h"
#include "c-family/c-pragma.h"
#include "c-lang.h"
#include "c-family/c-objc.h"
#include "plugin.h"
#include "omp-general.h"
#include "omp-offload.h"
#include "builtins.h"
#include "gomp-constants.h"
#include "c-family/c-indentation.h"
#include "gimple-expr.h"
#include "context.h"
#include "gcc-rich-location.h"
#include "c-parser.h"
#include "gimple-parser.h"
#include "read-rtl-function.h"
#include "run-rtl-passes.h"
#include "intl.h"
#include "c-family/name-hint.h"
#include "tree-iterator.h"
/* We need to walk over decls with incomplete struct/union/enum types
after parsing the whole translation unit.
In finish_decl(), if the decl is static, has incomplete
struct/union/enum type, it is appeneded to incomplete_record_decls.
In c_parser_translation_unit(), we iterate over incomplete_record_decls
and report error if any of the decls are still incomplete. */
vec<tree> incomplete_record_decls;
void
set_c_expr_source_range (c_expr *expr,
location_t start, location_t finish)
{
expr->src_range.m_start = start;
expr->src_range.m_finish = finish;
if (expr->value)
set_source_range (expr->value, start, finish);
}
void
set_c_expr_source_range (c_expr *expr,
source_range src_range)
{
expr->src_range = src_range;
if (expr->value)
set_source_range (expr->value, src_range);
}
/* Initialization routine for this file. */
void
c_parse_init (void)
{
/* The only initialization required is of the reserved word
identifiers. */
unsigned int i;
tree id;
int mask = 0;
/* Make sure RID_MAX hasn't grown past the 8 bits used to hold the keyword in
the c_token structure. */
gcc_assert (RID_MAX <= 255);
mask |= D_CXXONLY;
if (!flag_isoc99)
mask |= D_C99;
if (flag_no_asm)
{
mask |= D_ASM | D_EXT;
if (!flag_isoc99)
mask |= D_EXT89;
}
if (!c_dialect_objc ())
mask |= D_OBJC | D_CXX_OBJC;
ridpointers = ggc_cleared_vec_alloc<tree> ((int) RID_MAX);
for (i = 0; i < num_c_common_reswords; i++)
{
/* If a keyword is disabled, do not enter it into the table
and so create a canonical spelling that isn't a keyword. */
if (c_common_reswords[i].disable & mask)
{
if (warn_cxx_compat
&& (c_common_reswords[i].disable & D_CXXWARN))
{
id = get_identifier (c_common_reswords[i].word);
C_SET_RID_CODE (id, RID_CXX_COMPAT_WARN);
C_IS_RESERVED_WORD (id) = 1;
}
continue;
}
id = get_identifier (c_common_reswords[i].word);
C_SET_RID_CODE (id, c_common_reswords[i].rid);
C_IS_RESERVED_WORD (id) = 1;
ridpointers [(int) c_common_reswords[i].rid] = id;
}
for (i = 0; i < NUM_INT_N_ENTS; i++)
{
/* We always create the symbols but they aren't always supported. */
char name[50];
sprintf (name, "__int%d", int_n_data[i].bitsize);
id = get_identifier (name);
C_SET_RID_CODE (id, RID_FIRST_INT_N + i);
C_IS_RESERVED_WORD (id) = 1;
}
}
/* A parser structure recording information about the state and
context of parsing. Includes lexer information with up to two
tokens of look-ahead; more are not needed for C. */
struct GTY(()) c_parser {
/* The look-ahead tokens. */
c_token * GTY((skip)) tokens;
/* Buffer for look-ahead tokens. */
c_token tokens_buf[4];
/* How many look-ahead tokens are available (0 - 4, or
more if parsing from pre-lexed tokens). */
unsigned int tokens_avail;
/* True if a syntax error is being recovered from; false otherwise.
c_parser_error sets this flag. It should clear this flag when
enough tokens have been consumed to recover from the error. */
BOOL_BITFIELD error : 1;
/* True if we're processing a pragma, and shouldn't automatically
consume CPP_PRAGMA_EOL. */
BOOL_BITFIELD in_pragma : 1;
/* True if we're parsing the outermost block of an if statement. */
BOOL_BITFIELD in_if_block : 1;
/* True if we want to lex an untranslated string. */
BOOL_BITFIELD lex_untranslated_string : 1;
/* Objective-C specific parser/lexer information. */
/* True if we are in a context where the Objective-C "PQ" keywords
are considered keywords. */
BOOL_BITFIELD objc_pq_context : 1;
/* True if we are parsing a (potential) Objective-C foreach
statement. This is set to true after we parsed 'for (' and while
we wait for 'in' or ';' to decide if it's a standard C for loop or an
Objective-C foreach loop. */
BOOL_BITFIELD objc_could_be_foreach_context : 1;
/* The following flag is needed to contextualize Objective-C lexical
analysis. In some cases (e.g., 'int NSObject;'), it is
undesirable to bind an identifier to an Objective-C class, even
if a class with that name exists. */
BOOL_BITFIELD objc_need_raw_identifier : 1;
/* Nonzero if we're processing a __transaction statement. The value
is 1 | TM_STMT_ATTR_*. */
unsigned int in_transaction : 4;
/* True if we are in a context where the Objective-C "Property attribute"
keywords are valid. */
BOOL_BITFIELD objc_property_attr_context : 1;
/* Location of the last consumed token. */
location_t last_token_location;
};
/* Return a pointer to the Nth token in PARSERs tokens_buf. */
c_token *
c_parser_tokens_buf (c_parser *parser, unsigned n)
{
return &parser->tokens_buf[n];
}
/* Return the error state of PARSER. */
bool
c_parser_error (c_parser *parser)
{
return parser->error;
}
/* Set the error state of PARSER to ERR. */
void
c_parser_set_error (c_parser *parser, bool err)
{
parser->error = err;
}
/* The actual parser and external interface. ??? Does this need to be
garbage-collected? */
static GTY (()) c_parser *the_parser;
/* Read in and lex a single token, storing it in *TOKEN. */
static void
c_lex_one_token (c_parser *parser, c_token *token)
{
timevar_push (TV_LEX);
token->type = c_lex_with_flags (&token->value, &token->location,
&token->flags,
(parser->lex_untranslated_string
? C_LEX_STRING_NO_TRANSLATE : 0));
token->id_kind = C_ID_NONE;
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
switch (token->type)
{
case CPP_NAME:
{
tree decl;
bool objc_force_identifier = parser->objc_need_raw_identifier;
if (c_dialect_objc ())
parser->objc_need_raw_identifier = false;
if (C_IS_RESERVED_WORD (token->value))
{
enum rid rid_code = C_RID_CODE (token->value);
if (rid_code == RID_CXX_COMPAT_WARN)
{
warning_at (token->location,
OPT_Wc___compat,
"identifier %qE conflicts with C++ keyword",
token->value);
}
else if (rid_code >= RID_FIRST_ADDR_SPACE
&& rid_code <= RID_LAST_ADDR_SPACE)
{
addr_space_t as;
as = (addr_space_t) (rid_code - RID_FIRST_ADDR_SPACE);
targetm.addr_space.diagnose_usage (as, token->location);
token->id_kind = C_ID_ADDRSPACE;
token->keyword = rid_code;
break;
}
else if (c_dialect_objc () && OBJC_IS_PQ_KEYWORD (rid_code))
{
/* We found an Objective-C "pq" keyword (in, out,
inout, bycopy, byref, oneway). They need special
care because the interpretation depends on the
context. */
if (parser->objc_pq_context)
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
else if (parser->objc_could_be_foreach_context
&& rid_code == RID_IN)
{
/* We are in Objective-C, inside a (potential)
foreach context (which means after having
parsed 'for (', but before having parsed ';'),
and we found 'in'. We consider it the keyword
which terminates the declaration at the
beginning of a foreach-statement. Note that
this means you can't use 'in' for anything else
in that context; in particular, in Objective-C
you can't use 'in' as the name of the running
variable in a C for loop. We could potentially
try to add code here to disambiguate, but it
seems a reasonable limitation. */
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
/* Else, "pq" keywords outside of the "pq" context are
not keywords, and we fall through to the code for
normal tokens. */
}
else if (c_dialect_objc () && OBJC_IS_PATTR_KEYWORD (rid_code))
{
/* We found an Objective-C "property attribute"
keyword (getter, setter, readonly, etc). These are
only valid in the property context. */
if (parser->objc_property_attr_context)
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
/* Else they are not special keywords.
*/
}
else if (c_dialect_objc ()
&& (OBJC_IS_AT_KEYWORD (rid_code)
|| OBJC_IS_CXX_KEYWORD (rid_code)))
{
/* We found one of the Objective-C "@" keywords (defs,
selector, synchronized, etc) or one of the
Objective-C "cxx" keywords (class, private,
protected, public, try, catch, throw) without a
preceding '@' sign. Do nothing and fall through to
the code for normal tokens (in C++ we would still
consider the CXX ones keywords, but not in C). */
;
}
else
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
}
decl = lookup_name (token->value);
if (decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
{
token->id_kind = C_ID_TYPENAME;
break;
}
}
else if (c_dialect_objc ())
{
tree objc_interface_decl = objc_is_class_name (token->value);
/* Objective-C class names are in the same namespace as
variables and typedefs, and hence are shadowed by local
declarations. */
if (objc_interface_decl
&& (!objc_force_identifier || global_bindings_p ()))
{
token->value = objc_interface_decl;
token->id_kind = C_ID_CLASSNAME;
break;
}
}
token->id_kind = C_ID_ID;
}
break;
case CPP_AT_NAME:
/* This only happens in Objective-C; it must be a keyword. */
token->type = CPP_KEYWORD;
switch (C_RID_CODE (token->value))
{
/* Replace 'class' with '@class', 'private' with '@private',
etc. This prevents confusion with the C++ keyword
'class', and makes the tokens consistent with other
Objective-C 'AT' keywords. For example '@class' is
reported as RID_AT_CLASS which is consistent with
'@synchronized', which is reported as
RID_AT_SYNCHRONIZED.
*/
case RID_CLASS: token->keyword = RID_AT_CLASS; break;
case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break;
case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break;
case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break;
case RID_THROW: token->keyword = RID_AT_THROW; break;
case RID_TRY: token->keyword = RID_AT_TRY; break;
case RID_CATCH: token->keyword = RID_AT_CATCH; break;
case RID_SYNCHRONIZED: token->keyword = RID_AT_SYNCHRONIZED; break;
default: token->keyword = C_RID_CODE (token->value);
}
break;
case CPP_COLON:
case CPP_COMMA:
case CPP_CLOSE_PAREN:
case CPP_SEMICOLON:
/* These tokens may affect the interpretation of any identifiers
following, if doing Objective-C. */
if (c_dialect_objc ())
parser->objc_need_raw_identifier = false;
break;
case CPP_PRAGMA:
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value);
token->value = NULL;
break;
default:
break;
}
timevar_pop (TV_LEX);
}
/* Return a pointer to the next token from PARSER, reading it in if
necessary. */
c_token *
c_parser_peek_token (c_parser *parser)
{
if (parser->tokens_avail == 0)
{
c_lex_one_token (parser, &parser->tokens[0]);
parser->tokens_avail = 1;
}
return &parser->tokens[0];
}
/* Return a pointer to the next-but-one token from PARSER, reading it
in if necessary. The next token is already read in. */
c_token *
c_parser_peek_2nd_token (c_parser *parser)
{
if (parser->tokens_avail >= 2)
return &parser->tokens[1];
gcc_assert (parser->tokens_avail == 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL);
c_lex_one_token (parser, &parser->tokens[1]);
parser->tokens_avail = 2;
return &parser->tokens[1];
}
/* Return a pointer to the Nth token from PARSER, reading it
in if necessary. The N-1th token is already read in. */
c_token *
c_parser_peek_nth_token (c_parser *parser, unsigned int n)
{
/* N is 1-based, not zero-based. */
gcc_assert (n > 0);
if (parser->tokens_avail >= n)
return &parser->tokens[n - 1];
gcc_assert (parser->tokens_avail == n - 1);
c_lex_one_token (parser, &parser->tokens[n - 1]);
parser->tokens_avail = n;
return &parser->tokens[n - 1];
}
bool
c_keyword_starts_typename (enum rid keyword)
{
switch (keyword)
{
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
CASE_RID_FLOATN_NX:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_ATOMIC:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
case RID_AUTO_TYPE:
case RID_ALIGNAS:
return true;
default:
if (keyword >= RID_FIRST_INT_N
&& keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS
&& int_n_enabled_p[keyword - RID_FIRST_INT_N])
return true;
return false;
}
}
/* Return true if TOKEN can start a type name,
false otherwise. */
bool
c_token_starts_typename (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_ADDRSPACE:
return true;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
return c_keyword_starts_typename (token->keyword);
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if the next token from PARSER can start a type name,
false otherwise. LA specifies how to do lookahead in order to
detect unknown type names. If unsure, pick CLA_PREFER_ID. */
static inline bool
c_parser_next_tokens_start_typename (c_parser *parser, enum c_lookahead_kind la)
{
c_token *token = c_parser_peek_token (parser);
if (c_token_starts_typename (token))
return true;
/* Try a bit harder to detect an unknown typename. */
if (la != cla_prefer_id
&& token->type == CPP_NAME
&& token->id_kind == C_ID_ID
/* Do not try too hard when we could have "object in array". */
&& !parser->objc_could_be_foreach_context
&& (la == cla_prefer_type
|| c_parser_peek_2nd_token (parser)->type == CPP_NAME
|| c_parser_peek_2nd_token (parser)->type == CPP_MULT)
/* Only unknown identifiers. */
&& !lookup_name (token->value))
return true;
return false;
}
/* Return true if TOKEN is a type qualifier, false otherwise. */
static bool
c_token_is_qualifier (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ADDRSPACE:
return true;
default:
return false;
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
case RID_ATOMIC:
return true;
default:
return false;
}
case CPP_LESS:
return false;
default:
gcc_unreachable ();
}
}
/* Return true if the next token from PARSER is a type qualifier,
false otherwise. */
static inline bool
c_parser_next_token_is_qualifier (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
return c_token_is_qualifier (token);
}
/* Return true if TOKEN can start declaration specifiers, false
otherwise. */
static bool
c_token_starts_declspecs (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_ADDRSPACE:
return true;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_NORETURN:
case RID_AUTO:
case RID_THREAD:
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
CASE_RID_FLOATN_NX:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
case RID_ALIGNAS:
case RID_ATOMIC:
case RID_AUTO_TYPE:
return true;
default:
if (token->keyword >= RID_FIRST_INT_N
&& token->keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS
&& int_n_enabled_p[token->keyword - RID_FIRST_INT_N])
return true;
return false;
}
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if TOKEN can start declaration specifiers or a static
assertion, false otherwise. */
static bool
c_token_starts_declaration (c_token *token)
{
if (c_token_starts_declspecs (token)
|| token->keyword == RID_STATIC_ASSERT)
return true;
else
return false;
}
/* Return true if the next token from PARSER can start declaration
specifiers, false otherwise. */
bool
c_parser_next_token_starts_declspecs (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
/* In Objective-C, a classname normally starts a declspecs unless it
is immediately followed by a dot. In that case, it is the
Objective-C 2.0 "dot-syntax" for class objects, ie, calls the
setter/getter on the class. c_token_starts_declspecs() can't
differentiate between the two cases because it only checks the
current token, so we have a special check here. */
if (c_dialect_objc ()
&& token->type == CPP_NAME
&& token->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
return false;
return c_token_starts_declspecs (token);
}
/* Return true if the next tokens from PARSER can start declaration
specifiers or a static assertion, false otherwise. */
bool
c_parser_next_tokens_start_declaration (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
/* Same as above. */
if (c_dialect_objc ()
&& token->type == CPP_NAME
&& token->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
return false;
/* Labels do not start declarations. */
if (token->type == CPP_NAME
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
return false;
if (c_token_starts_declaration (token))
return true;
if (c_parser_next_tokens_start_typename (parser, cla_nonabstract_decl))
return true;
return false;
}
/* Consume the next token from PARSER. */
void
c_parser_consume_token (c_parser *parser)
{
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL);
gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA);
parser->last_token_location = parser->tokens[0].location;
if (parser->tokens != &parser->tokens_buf[0])
parser->tokens++;
else if (parser->tokens_avail == 2)
parser->tokens[0] = parser->tokens[1];
parser->tokens_avail--;
}
/* Expect the current token to be a #pragma. Consume it and remember
that we've begun parsing a pragma. */
static void
c_parser_consume_pragma (c_parser *parser)
{
gcc_assert (!parser->in_pragma);
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type == CPP_PRAGMA);
if (parser->tokens != &parser->tokens_buf[0])
parser->tokens++;
else if (parser->tokens_avail == 2)
parser->tokens[0] = parser->tokens[1];
parser->tokens_avail--;
parser->in_pragma = true;
}
/* Update the global input_location from TOKEN. */
static inline void
c_parser_set_source_position_from_token (c_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
}
}
/* Helper function for c_parser_error.
Having peeked a token of kind TOK1_KIND that might signify
a conflict marker, peek successor tokens to determine
if we actually do have a conflict marker.
Specifically, we consider a run of 7 '<', '=' or '>' characters
at the start of a line as a conflict marker.
These come through the lexer as three pairs and a single,
e.g. three CPP_LSHIFT ("<<") and a CPP_LESS ('<').
If it returns true, *OUT_LOC is written to with the location/range
of the marker. */
static bool
c_parser_peek_conflict_marker (c_parser *parser, enum cpp_ttype tok1_kind,
location_t *out_loc)
{
c_token *token2 = c_parser_peek_2nd_token (parser);
if (token2->type != tok1_kind)
return false;
c_token *token3 = c_parser_peek_nth_token (parser, 3);
if (token3->type != tok1_kind)
return false;
c_token *token4 = c_parser_peek_nth_token (parser, 4);
if (token4->type != conflict_marker_get_final_tok_kind (tok1_kind))
return false;
/* It must be at the start of the line. */
location_t start_loc = c_parser_peek_token (parser)->location;
if (LOCATION_COLUMN (start_loc) != 1)
return false;
/* We have a conflict marker. Construct a location of the form:
<<<<<<<
^~~~~~~
with start == caret, finishing at the end of the marker. */
location_t finish_loc = get_finish (token4->location);
*out_loc = make_location (start_loc, start_loc, finish_loc);
return true;
}
/* Issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream of PARSER.
MESSAGE (specified by the caller) is usually of the form "expected
OTHER-TOKEN".
Use RICHLOC as the location of the diagnostic.
Do not issue a diagnostic if still recovering from an error.
Return true iff an error was actually emitted.
??? This is taken from the C++ parser, but building up messages in
this way is not i18n-friendly and some other approach should be
used. */
static bool
c_parser_error_richloc (c_parser *parser, const char *gmsgid,
rich_location *richloc)
{
c_token *token = c_parser_peek_token (parser);
if (parser->error)
return false;
parser->error = true;
if (!gmsgid)
return false;
/* If this is actually a conflict marker, report it as such. */
if (token->type == CPP_LSHIFT
|| token->type == CPP_RSHIFT
|| token->type == CPP_EQ_EQ)
{
location_t loc;
if (c_parser_peek_conflict_marker (parser, token->type, &loc))
{
error_at (loc, "version control conflict marker in file");
return true;
}
}
c_parse_error (gmsgid,
/* Because c_parse_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
/* ??? The C parser does not save the cpp flags of a
token, we need to pass 0 here and we will not get
the source spelling of some tokens but rather the
canonical spelling. */
token->value, /*flags=*/0, richloc);
return true;
}
/* As c_parser_error_richloc, but issue the message at the
location of PARSER's next token, or at input_location
if the next token is EOF. */
bool
c_parser_error (c_parser *parser, const char *gmsgid)
{
c_token *token = c_parser_peek_token (parser);
c_parser_set_source_position_from_token (token);
rich_location richloc (line_table, input_location);
return c_parser_error_richloc (parser, gmsgid, &richloc);
}
/* Some tokens naturally come in pairs e.g.'(' and ')'.
This class is for tracking such a matching pair of symbols.
In particular, it tracks the location of the first token,
so that if the second token is missing, we can highlight the
location of the first token when notifying the user about the
problem. */
template <typename traits_t>
class token_pair
{
public:
/* token_pair's ctor. */
token_pair () : m_open_loc (UNKNOWN_LOCATION) {}
/* If the next token is the opening symbol for this pair, consume it and
return true.
Otherwise, issue an error and return false.
In either case, record the location of the opening token. */
bool require_open (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
if (token)
m_open_loc = token->location;
return c_parser_require (parser, traits_t::open_token_type,
traits_t::open_gmsgid);
}
/* Consume the next token from PARSER, recording its location as
that of the opening token within the pair. */
void consume_open (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
gcc_assert (token->type == traits_t::open_token_type);
m_open_loc = token->location;
c_parser_consume_token (parser);
}
/* If the next token is the closing symbol for this pair, consume it
and return true.
Otherwise, issue an error, highlighting the location of the
corresponding opening token, and return false. */
bool require_close (c_parser *parser) const
{
return c_parser_require (parser, traits_t::close_token_type,
traits_t::close_gmsgid, m_open_loc);
}
/* Like token_pair::require_close, except that tokens will be skipped
until the desired token is found. An error message is still produced
if the next token is not as expected. */
void skip_until_found_close (c_parser *parser) const
{
c_parser_skip_until_found (parser, traits_t::close_token_type,
traits_t::close_gmsgid, m_open_loc);
}
private:
location_t m_open_loc;
};
/* Traits for token_pair<T> for tracking matching pairs of parentheses. */
struct matching_paren_traits
{
static const enum cpp_ttype open_token_type = CPP_OPEN_PAREN;
static const char * const open_gmsgid;
static const enum cpp_ttype close_token_type = CPP_CLOSE_PAREN;
static const char * const close_gmsgid;
};
const char * const matching_paren_traits::open_gmsgid = "expected %<(%>";
const char * const matching_paren_traits::close_gmsgid = "expected %<)%>";
/* "matching_parens" is a token_pair<T> class for tracking matching
pairs of parentheses. */
typedef token_pair<matching_paren_traits> matching_parens;
/* Traits for token_pair<T> for tracking matching pairs of braces. */
struct matching_brace_traits
{
static const enum cpp_ttype open_token_type = CPP_OPEN_BRACE;
static const char * const open_gmsgid;
static const enum cpp_ttype close_token_type = CPP_CLOSE_BRACE;
static const char * const close_gmsgid;
};
const char * const matching_brace_traits::open_gmsgid = "expected %<{%>";
const char * const matching_brace_traits::close_gmsgid = "expected %<}%>";
/* "matching_braces" is a token_pair<T> class for tracking matching
pairs of braces. */
typedef token_pair<matching_brace_traits> matching_braces;
/* Get a description of the matching symbol to TYPE e.g. "(" for
CPP_CLOSE_PAREN. */
static const char *
get_matching_symbol (enum cpp_ttype type)
{
switch (type)
{
default:
gcc_unreachable ();
return "";
case CPP_CLOSE_PAREN:
return "(";
case CPP_CLOSE_BRACE:
return "{";
}
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue the error MSGID. If MSGID is NULL then a message has already
been produced and no message will be produced this time. Returns
true if found, false otherwise.
If MATCHING_LOCATION is not UNKNOWN_LOCATION, then highlight it
within any error as the location of an "opening" token matching
the close token TYPE (e.g. the location of the '(' when TYPE is
CPP_CLOSE_PAREN).
If TYPE_IS_UNIQUE is true (the default) then msgid describes exactly
one type (e.g. "expected %<)%>") and thus it may be reasonable to
attempt to generate a fix-it hint for the problem.
Otherwise msgid describes multiple token types (e.g.
"expected %<;%>, %<,%> or %<)%>"), and thus we shouldn't attempt to
generate a fix-it hint. */
bool
c_parser_require (c_parser *parser,
enum cpp_ttype type,
const char *msgid,
location_t matching_location,
bool type_is_unique)
{
if (c_parser_next_token_is (parser, type))
{
c_parser_consume_token (parser);
return true;
}
else
{
location_t next_token_loc = c_parser_peek_token (parser)->location;
gcc_rich_location richloc (next_token_loc);
/* Potentially supply a fix-it hint, suggesting to add the
missing token immediately after the *previous* token.
This may move the primary location within richloc. */
if (!parser->error && type_is_unique)
maybe_suggest_missing_token_insertion (&richloc, type,
parser->last_token_location);
/* If matching_location != UNKNOWN_LOCATION, highlight it.
Attempt to consolidate diagnostics by printing it as a
secondary range within the main diagnostic. */
bool added_matching_location = false;
if (matching_location != UNKNOWN_LOCATION)
added_matching_location
= richloc.add_location_if_nearby (matching_location);
if (c_parser_error_richloc (parser, msgid, &richloc))
/* If we weren't able to consolidate matching_location, then
print it as a secondary diagnostic. */
if (matching_location != UNKNOWN_LOCATION && !added_matching_location)
inform (matching_location, "to match this %qs",
get_matching_symbol (type));
return false;
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue the error MSGID. Returns true if found, false otherwise. */
static bool
c_parser_require_keyword (c_parser *parser,
enum rid keyword,
const char *msgid)
{
if (c_parser_next_token_is_keyword (parser, keyword))
{
c_parser_consume_token (parser);
return true;
}
else
{
c_parser_error (parser, msgid);
return false;
}
}
/* Like c_parser_require, except that tokens will be skipped until the
desired token is found. An error message is still produced if the
next token is not as expected. If MSGID is NULL then a message has
already been produced and no message will be produced this
time.
If MATCHING_LOCATION is not UNKNOWN_LOCATION, then highlight it
within any error as the location of an "opening" token matching
the close token TYPE (e.g. the location of the '(' when TYPE is
CPP_CLOSE_PAREN). */
void
c_parser_skip_until_found (c_parser *parser,
enum cpp_ttype type,
const char *msgid,
location_t matching_location)
{
unsigned nesting_depth = 0;
if (c_parser_require (parser, type, msgid, matching_location))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
c_token *token = c_parser_peek_token (parser);
/* If we've reached the token we want, consume it and stop. */
if (token->type == type && !nesting_depth)
{
c_parser_consume_token (parser);
break;
}
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Skip tokens until the end of a parameter is found, but do not
consume the comma, semicolon or closing delimiter. */
static void
c_parser_skip_to_end_of_parameter (c_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON)
&& !nesting_depth)
break;
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Expect to be at the end of the pragma directive and consume an
end of line marker. */
static void
c_parser_skip_to_pragma_eol (c_parser *parser, bool error_if_not_eol = true)
{
gcc_assert (parser->in_pragma);
parser->in_pragma = false;
if (error_if_not_eol && c_parser_peek_token (parser)->type != CPP_PRAGMA_EOL)
c_parser_error (parser, "expected end of line");
cpp_ttype token_type;
do
{
c_token *token = c_parser_peek_token (parser);
token_type = token->type;
if (token_type == CPP_EOF)
break;
c_parser_consume_token (parser);
}
while (token_type != CPP_PRAGMA_EOL);
parser->error = false;
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested ';'. */
static void
c_parser_skip_to_end_of_block_or_statement (c_parser *parser)
{
unsigned nesting_depth = 0;
bool save_error = parser->error;
while (true)
{
c_token *token;
/* Peek at the next token. */
token = c_parser_peek_token (parser);
switch (token->type)
{
case CPP_EOF:
return;
case CPP_PRAGMA_EOL:
if (parser->in_pragma)
return;
break;
case CPP_SEMICOLON:
/* If the next token is a ';', we have reached the
end of the statement. */
if (!nesting_depth)
{
/* Consume the ';'. */
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested '}', then we have
reached the end of the current block. */
if (nesting_depth == 0 || --nesting_depth == 0)
{
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_OPEN_BRACE:
/* If it the next token is a '{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
case CPP_PRAGMA:
/* If we see a pragma, consume the whole thing at once. We
have some safeguards against consuming pragmas willy-nilly.
Normally, we'd expect to be here with parser->error set,
which disables these safeguards. But it's possible to get
here for secondary error recovery, after parser->error has
been cleared. */
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
parser->error = save_error;
continue;
default:
break;
}
c_parser_consume_token (parser);
}
finished:
parser->error = false;
}
/* CPP's options (initialized by c-opts.c). */
extern cpp_options *cpp_opts;
/* Save the warning flags which are controlled by __extension__. */
static inline int
disable_extension_diagnostics (void)
{
int ret = (pedantic
| (warn_pointer_arith << 1)
| (warn_traditional << 2)
| (flag_iso << 3)
| (warn_long_long << 4)
| (warn_cxx_compat << 5)
| (warn_overlength_strings << 6)
/* warn_c90_c99_compat has three states: -1/0/1, so we must
play tricks to properly restore it. */
| ((warn_c90_c99_compat == 1) << 7)
| ((warn_c90_c99_compat == -1) << 8)
/* Similarly for warn_c99_c11_compat. */
| ((warn_c99_c11_compat == 1) << 9)
| ((warn_c99_c11_compat == -1) << 10)
);
cpp_opts->cpp_pedantic = pedantic = 0;
warn_pointer_arith = 0;
cpp_opts->cpp_warn_traditional = warn_traditional = 0;
flag_iso = 0;
cpp_opts->cpp_warn_long_long = warn_long_long = 0;
warn_cxx_compat = 0;
warn_overlength_strings = 0;
warn_c90_c99_compat = 0;
warn_c99_c11_compat = 0;
return ret;
}
/* Restore the warning flags which are controlled by __extension__.
FLAGS is the return value from disable_extension_diagnostics. */
static inline void
restore_extension_diagnostics (int flags)
{
cpp_opts->cpp_pedantic = pedantic = flags & 1;
warn_pointer_arith = (flags >> 1) & 1;
cpp_opts->cpp_warn_traditional = warn_traditional = (flags >> 2) & 1;
flag_iso = (flags >> 3) & 1;
cpp_opts->cpp_warn_long_long = warn_long_long = (flags >> 4) & 1;
warn_cxx_compat = (flags >> 5) & 1;
warn_overlength_strings = (flags >> 6) & 1;
/* See above for why is this needed. */
warn_c90_c99_compat = (flags >> 7) & 1 ? 1 : ((flags >> 8) & 1 ? -1 : 0);
warn_c99_c11_compat = (flags >> 9) & 1 ? 1 : ((flags >> 10) & 1 ? -1 : 0);
}
/* Helper data structure for parsing #pragma acc routine. */
struct oacc_routine_data {
bool error_seen; /* Set if error has been reported. */
bool fndecl_seen; /* Set if one fn decl/definition has been seen already. */
tree clauses;
location_t loc;
};
static void c_parser_external_declaration (c_parser *);
static void c_parser_asm_definition (c_parser *);
static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool,
bool, bool, tree *, vec<c_token>,
struct oacc_routine_data * = NULL,
bool * = NULL);
static void c_parser_static_assert_declaration_no_semi (c_parser *);
static void c_parser_static_assert_declaration (c_parser *);
static struct c_typespec c_parser_enum_specifier (c_parser *);
static struct c_typespec c_parser_struct_or_union_specifier (c_parser *);
static tree c_parser_struct_declaration (c_parser *);
static struct c_typespec c_parser_typeof_specifier (c_parser *);
static tree c_parser_alignas_specifier (c_parser *);
static struct c_declarator *c_parser_direct_declarator (c_parser *, bool,
c_dtr_syn, bool *);
static struct c_declarator *c_parser_direct_declarator_inner (c_parser *,
bool,
struct c_declarator *);
static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree);
static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree,
tree);
static struct c_parm *c_parser_parameter_declaration (c_parser *, tree);
static tree c_parser_simple_asm_expr (c_parser *);
static tree c_parser_attributes (c_parser *);
static struct c_expr c_parser_initializer (c_parser *);
static struct c_expr c_parser_braced_init (c_parser *, tree, bool,
struct obstack *);
static void c_parser_initelt (c_parser *, struct obstack *);
static void c_parser_initval (c_parser *, struct c_expr *,
struct obstack *);
static tree c_parser_compound_statement (c_parser *);
static void c_parser_compound_statement_nostart (c_parser *);
static void c_parser_label (c_parser *);
static void c_parser_statement (c_parser *, bool *, location_t * = NULL);
static void c_parser_statement_after_labels (c_parser *, bool *,
vec<tree> * = NULL);
static tree c_parser_c99_block_statement (c_parser *, bool *,
location_t * = NULL);
static void c_parser_if_statement (c_parser *, bool *, vec<tree> *);
static void c_parser_switch_statement (c_parser *, bool *);
static void c_parser_while_statement (c_parser *, bool, unsigned short, bool *);
static void c_parser_do_statement (c_parser *, bool, unsigned short);
static void c_parser_for_statement (c_parser *, bool, unsigned short, bool *);
static tree c_parser_asm_statement (c_parser *);
static tree c_parser_asm_operands (c_parser *);
static tree c_parser_asm_goto_operands (c_parser *);
static tree c_parser_asm_clobbers (c_parser *);
static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *,
tree = NULL_TREE);
static struct c_expr c_parser_conditional_expression (c_parser *,
struct c_expr *, tree);
static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *,
tree);
static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *);
static struct c_expr c_parser_unary_expression (c_parser *);
static struct c_expr c_parser_sizeof_expression (c_parser *);
static struct c_expr c_parser_alignof_expression (c_parser *);
static struct c_expr c_parser_postfix_expression (c_parser *);
static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *,
struct c_type_name *,
location_t);
static struct c_expr c_parser_postfix_expression_after_primary (c_parser *,
location_t loc,
struct c_expr);
static tree c_parser_transaction (c_parser *, enum rid);
static struct c_expr c_parser_transaction_expression (c_parser *, enum rid);
static tree c_parser_transaction_cancel (c_parser *);
static struct c_expr c_parser_expression (c_parser *);
static struct c_expr c_parser_expression_conv (c_parser *);
static vec<tree, va_gc> *c_parser_expr_list (c_parser *, bool, bool,
vec<tree, va_gc> **, location_t *,
tree *, vec<location_t> *,
unsigned int * = NULL);
static void c_parser_oacc_declare (c_parser *);
static void c_parser_oacc_enter_exit_data (c_parser *, bool);
static void c_parser_oacc_update (c_parser *);
static void c_parser_omp_construct (c_parser *, bool *);
static void c_parser_omp_threadprivate (c_parser *);
static void c_parser_omp_barrier (c_parser *);
static void c_parser_omp_flush (c_parser *);
static tree c_parser_omp_for_loop (location_t, c_parser *, enum tree_code,
tree, tree *, bool *);
static void c_parser_omp_taskwait (c_parser *);
static void c_parser_omp_taskyield (c_parser *);
static void c_parser_omp_cancel (c_parser *);
enum pragma_context { pragma_external, pragma_struct, pragma_param,
pragma_stmt, pragma_compound };
static bool c_parser_pragma (c_parser *, enum pragma_context, bool *);
static void c_parser_omp_cancellation_point (c_parser *, enum pragma_context);
static bool c_parser_omp_target (c_parser *, enum pragma_context, bool *);
static void c_parser_omp_end_declare_target (c_parser *);
static void c_parser_omp_declare (c_parser *, enum pragma_context);
static bool c_parser_omp_ordered (c_parser *, enum pragma_context, bool *);
static void c_parser_oacc_routine (c_parser *, enum pragma_context);
/* These Objective-C parser functions are only ever called when
compiling Objective-C. */
static void c_parser_objc_class_definition (c_parser *, tree);
static void c_parser_objc_class_instance_variables (c_parser *);
static void c_parser_objc_class_declaration (c_parser *);
static void c_parser_objc_alias_declaration (c_parser *);
static void c_parser_objc_protocol_definition (c_parser *, tree);
static bool c_parser_objc_method_type (c_parser *);
static void c_parser_objc_method_definition (c_parser *);
static void c_parser_objc_methodprotolist (c_parser *);
static void c_parser_objc_methodproto (c_parser *);
static tree c_parser_objc_method_decl (c_parser *, bool, tree *, tree *);
static tree c_parser_objc_type_name (c_parser *);
static tree c_parser_objc_protocol_refs (c_parser *);
static void c_parser_objc_try_catch_finally_statement (c_parser *);
static void c_parser_objc_synchronized_statement (c_parser *);
static tree c_parser_objc_selector (c_parser *);
static tree c_parser_objc_selector_arg (c_parser *);
static tree c_parser_objc_receiver (c_parser *);
static tree c_parser_objc_message_args (c_parser *);
static tree c_parser_objc_keywordexpr (c_parser *);
static void c_parser_objc_at_property_declaration (c_parser *);
static void c_parser_objc_at_synthesize_declaration (c_parser *);
static void c_parser_objc_at_dynamic_declaration (c_parser *);
static bool c_parser_objc_diagnose_bad_element_prefix
(c_parser *, struct c_declspecs *);
static void c_parser_parse_rtl_body (c_parser *parser, char *start_with_pass);
/* Parse a translation unit (C90 6.7, C99 6.9, C11 6.9).
translation-unit:
external-declarations
external-declarations:
external-declaration
external-declarations external-declaration
GNU extensions:
translation-unit:
empty
*/
static void
c_parser_translation_unit (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_EOF))
{
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"ISO C forbids an empty translation unit");
}
else
{
void *obstack_position = obstack_alloc (&parser_obstack, 0);
mark_valid_location_for_stdc_pragma (false);
do
{
ggc_collect ();
c_parser_external_declaration (parser);
obstack_free (&parser_obstack, obstack_position);
}
while (c_parser_next_token_is_not (parser, CPP_EOF));
}
unsigned int i;
tree decl;
FOR_EACH_VEC_ELT (incomplete_record_decls, i, decl)
if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node)
error ("storage size of %q+D isn%'t known", decl);
}
/* Parse an external declaration (C90 6.7, C99 6.9, C11 6.9).
external-declaration:
function-definition
declaration
GNU extensions:
external-declaration:
asm-definition
;
__extension__ external-declaration
Objective-C:
external-declaration:
objc-class-definition
objc-class-declaration
objc-alias-declaration
objc-protocol-definition
objc-method-definition
@end
*/
static void
c_parser_external_declaration (c_parser *parser)
{
int ext;
switch (c_parser_peek_token (parser)->type)
{
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_EXTENSION:
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_external_declaration (parser);
restore_extension_diagnostics (ext);
break;
case RID_ASM:
c_parser_asm_definition (parser);
break;
case RID_AT_INTERFACE:
case RID_AT_IMPLEMENTATION:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_definition (parser, NULL_TREE);
break;
case RID_AT_CLASS:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_declaration (parser);
break;
case RID_AT_ALIAS:
gcc_assert (c_dialect_objc ());
c_parser_objc_alias_declaration (parser);
break;
case RID_AT_PROTOCOL:
gcc_assert (c_dialect_objc ());
c_parser_objc_protocol_definition (parser, NULL_TREE);
break;
case RID_AT_PROPERTY:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_property_declaration (parser);
break;
case RID_AT_SYNTHESIZE:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_synthesize_declaration (parser);
break;
case RID_AT_DYNAMIC:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_dynamic_declaration (parser);
break;
case RID_AT_END:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
objc_finish_implementation ();
break;
default:
goto decl_or_fndef;
}
break;
case CPP_SEMICOLON:
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
mark_valid_location_for_stdc_pragma (true);
c_parser_pragma (parser, pragma_external, NULL);
mark_valid_location_for_stdc_pragma (false);
break;
case CPP_PLUS:
case CPP_MINUS:
if (c_dialect_objc ())
{
c_parser_objc_method_definition (parser);
break;
}
/* Else fall through, and yield a syntax error trying to parse
as a declaration or function definition. */
/* FALLTHRU */
default:
decl_or_fndef:
/* A declaration or a function definition (or, in Objective-C,
an @interface or @protocol with prefix attributes). We can
only tell which after parsing the declaration specifiers, if
any, and the first declarator. */
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, vNULL);
break;
}
}
static void c_finish_omp_declare_simd (c_parser *, tree, tree, vec<c_token>);
static void c_finish_oacc_routine (struct oacc_routine_data *, tree, bool);
/* Build and add a DEBUG_BEGIN_STMT statement with location LOC. */
static void
add_debug_begin_stmt (location_t loc)
{
/* Don't add DEBUG_BEGIN_STMTs outside of functions, see PR84721. */
if (!MAY_HAVE_DEBUG_MARKER_STMTS || !building_stmt_list_p ())
return;
tree stmt = build0 (DEBUG_BEGIN_STMT, void_type_node);
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
}
/* Parse a declaration or function definition (C90 6.5, 6.7.1, C99
6.7, 6.9.1, C11 6.7, 6.9.1). If FNDEF_OK is true, a function definition
is accepted; otherwise (old-style parameter declarations) only other
declarations are accepted. If STATIC_ASSERT_OK is true, a static
assertion is accepted; otherwise (old-style parameter declarations)
it is not. If NESTED is true, we are inside a function or parsing
old-style parameter declarations; any functions encountered are
nested functions and declaration specifiers are required; otherwise
we are at top level and functions are normal functions and
declaration specifiers may be optional. If EMPTY_OK is true, empty
declarations are OK (subject to all other constraints); otherwise
(old-style parameter declarations) they are diagnosed. If
START_ATTR_OK is true, the declaration specifiers may start with
attributes; otherwise they may not.
OBJC_FOREACH_OBJECT_DECLARATION can be used to get back the parsed
declaration when parsing an Objective-C foreach statement.
FALLTHRU_ATTR_P is used to signal whether this function parsed
"__attribute__((fallthrough));".
declaration:
declaration-specifiers init-declarator-list[opt] ;
static_assert-declaration
function-definition:
declaration-specifiers[opt] declarator declaration-list[opt]
compound-statement
declaration-list:
declaration
declaration-list declaration
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
init-declarator:
declarator simple-asm-expr[opt] attributes[opt]
declarator simple-asm-expr[opt] attributes[opt] = initializer
GNU extensions:
nested-function-definition:
declaration-specifiers declarator declaration-list[opt]
compound-statement
attribute ;
Objective-C:
attributes objc-class-definition
attributes objc-category-definition
attributes objc-protocol-definition
The simple-asm-expr and attributes are GNU extensions.
This function does not handle __extension__; that is handled in its
callers. ??? Following the old parser, __extension__ may start
external declarations, declarations in functions and declarations
at the start of "for" loops, but not old-style parameter
declarations.
C99 requires declaration specifiers in a function definition; the
absence is diagnosed through the diagnosis of implicit int. In GNU
C we also allow but diagnose declarations without declaration
specifiers, but only at top level (elsewhere they conflict with
other syntax).
In Objective-C, declarations of the looping variable in a foreach
statement are exceptionally terminated by 'in' (for example, 'for
(NSObject *object in array) { ... }').
OpenMP:
declaration:
threadprivate-directive
GIMPLE:
gimple-function-definition:
declaration-specifiers[opt] __GIMPLE (gimple-or-rtl-pass-list) declarator
declaration-list[opt] compound-statement
rtl-function-definition:
declaration-specifiers[opt] __RTL (gimple-or-rtl-pass-list) declarator
declaration-list[opt] compound-statement */
static void
c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok,
bool static_assert_ok, bool empty_ok,
bool nested, bool start_attr_ok,
tree *objc_foreach_object_declaration,
vec<c_token> omp_declare_simd_clauses,
struct oacc_routine_data *oacc_routine_data,
bool *fallthru_attr_p)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
bool diagnosed_no_specs = false;
location_t here = c_parser_peek_token (parser)->location;
add_debug_begin_stmt (c_parser_peek_token (parser)->location);
if (static_assert_ok
&& c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT))
{
c_parser_static_assert_declaration (parser);
return;
}
specs = build_null_declspecs ();
/* Try to detect an unknown type name when we have "A B" or "A *B". */
if (c_parser_peek_token (parser)->type == CPP_NAME
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
&& (c_parser_peek_2nd_token (parser)->type == CPP_NAME
|| c_parser_peek_2nd_token (parser)->type == CPP_MULT)
&& (!nested || !lookup_name (c_parser_peek_token (parser)->value)))
{
tree name = c_parser_peek_token (parser)->value;
/* Issue a warning about NAME being an unknown type name, perhaps
with some kind of hint.
If the user forgot a "struct" etc, suggest inserting
it. Otherwise, attempt to look for misspellings. */
gcc_rich_location richloc (here);
if (tag_exists_p (RECORD_TYPE, name))
{
/* This is not C++ with its implicit typedef. */
richloc.add_fixit_insert_before ("struct ");
error_at (&richloc,
"unknown type name %qE;"
" use %<struct%> keyword to refer to the type",
name);
}
else if (tag_exists_p (UNION_TYPE, name))
{
richloc.add_fixit_insert_before ("union ");
error_at (&richloc,
"unknown type name %qE;"
" use %<union%> keyword to refer to the type",
name);
}
else if (tag_exists_p (ENUMERAL_TYPE, name))
{
richloc.add_fixit_insert_before ("enum ");
error_at (&richloc,
"unknown type name %qE;"
" use %<enum%> keyword to refer to the type",
name);
}
else
{
name_hint hint = lookup_name_fuzzy (name, FUZZY_LOOKUP_TYPENAME,
here);
if (hint)
{
richloc.add_fixit_replace (hint.suggestion ());
error_at (&richloc,
"unknown type name %qE; did you mean %qs?",
name, hint.suggestion ());
}
else
error_at (here, "unknown type name %qE", name);
}
/* Parse declspecs normally to get a correct pointer type, but avoid
a further "fails to be a type name" error. Refuse nested functions
since it is not how the user likely wants us to recover. */
c_parser_peek_token (parser)->type = CPP_KEYWORD;
c_parser_peek_token (parser)->keyword = RID_VOID;
c_parser_peek_token (parser)->value = error_mark_node;
fndef_ok = !nested;
}
c_parser_declspecs (parser, specs, true, true, start_attr_ok,
true, true, cla_nonabstract_decl);
if (parser->error)
{
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (nested && !specs->declspecs_seen_p)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
finish_declspecs (specs);
bool auto_type_p = specs->typespec_word == cts_auto_type;
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (auto_type_p)
error_at (here, "%<__auto_type%> in empty declaration");
else if (specs->typespec_kind == ctsk_none
&& attribute_fallthrough_p (specs->attrs))
{
if (fallthru_attr_p != NULL)
*fallthru_attr_p = true;
tree fn = build_call_expr_internal_loc (here, IFN_FALLTHROUGH,
void_type_node, 0);
add_stmt (fn);
}
else if (empty_ok)
shadow_tag (specs);
else
{
shadow_tag_warned (specs, 1);
pedwarn (here, 0, "empty declaration");
}
c_parser_consume_token (parser);
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, NULL_TREE, false);
return;
}
/* Provide better error recovery. Note that a type name here is usually
better diagnosed as a redeclaration. */
if (empty_ok
&& specs->typespec_kind == ctsk_tagdef
&& c_parser_next_token_starts_declspecs (parser)
&& !c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<;%>, identifier or %<(%>");
parser->error = false;
shadow_tag_warned (specs, 1);
return;
}
else if (c_dialect_objc () && !auto_type_p)
{
/* Prefix attributes are an error on method decls. */
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
case CPP_MINUS:
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
if (specs->attrs)
{
warning_at (c_parser_peek_token (parser)->location,
OPT_Wattributes,
"prefix attributes are ignored for methods");
specs->attrs = NULL_TREE;
}
if (fndef_ok)
c_parser_objc_method_definition (parser);
else
c_parser_objc_methodproto (parser);
return;
break;
default:
break;
}
/* This is where we parse 'attributes @interface ...',
'attributes @implementation ...', 'attributes @protocol ...'
(where attributes could be, for example, __attribute__
((deprecated)).
*/
switch (c_parser_peek_token (parser)->keyword)
{
case RID_AT_INTERFACE:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
c_parser_objc_class_definition (parser, specs->attrs);
return;
}
break;
case RID_AT_IMPLEMENTATION:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
if (specs->attrs)
{
warning_at (c_parser_peek_token (parser)->location,
OPT_Wattributes,
"prefix attributes are ignored for implementations");
specs->attrs = NULL_TREE;
}
c_parser_objc_class_definition (parser, NULL_TREE);
return;
}
break;
case RID_AT_PROTOCOL:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
c_parser_objc_protocol_definition (parser, specs->attrs);
return;
}
break;
case RID_AT_ALIAS:
case RID_AT_CLASS:
case RID_AT_END:
case RID_AT_PROPERTY:
if (specs->attrs)
{
c_parser_error (parser, "unexpected attribute");
specs->attrs = NULL;
}
break;
default:
break;
}
}
else if (attribute_fallthrough_p (specs->attrs))
warning_at (here, OPT_Wattributes,
"%<fallthrough%> attribute not followed by %<;%>");
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
while (true)
{
struct c_declarator *declarator;
bool dummy = false;
timevar_id_t tv;
tree fnbody = NULL_TREE;
/* Declaring either one or more declarators (in which case we
should diagnose if there were no declaration specifiers) or a
function definition (in which case the diagnostic for
implicit int suffices). */
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
if (omp_declare_simd_clauses.exists ())
c_finish_omp_declare_simd (parser, NULL_TREE, NULL_TREE,
omp_declare_simd_clauses);
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, NULL_TREE, false);
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (auto_type_p && declarator->kind != cdk_id)
{
error_at (here,
"%<__auto_type%> requires a plain identifier"
" as declarator");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (c_parser_next_token_is (parser, CPP_EQ)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is_keyword (parser, RID_ASM)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)
|| c_parser_next_token_is_keyword (parser, RID_IN))
{
tree asm_name = NULL_TREE;
tree postfix_attrs = NULL_TREE;
if (!diagnosed_no_specs && !specs->declspecs_seen_p)
{
diagnosed_no_specs = true;
pedwarn (here, 0, "data definition has no type or storage class");
}
/* Having seen a data definition, there cannot now be a
function definition. */
fndef_ok = false;
if (c_parser_next_token_is_keyword (parser, RID_ASM))
asm_name = c_parser_simple_asm_expr (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
postfix_attrs = c_parser_attributes (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* This means there is an attribute specifier after
the declarator in a function definition. Provide
some more information for the user. */
error_at (here, "attributes should be specified before the "
"declarator in a function definition");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
}
if (c_parser_next_token_is (parser, CPP_EQ))
{
tree d;
struct c_expr init;
location_t init_loc;
c_parser_consume_token (parser);
if (auto_type_p)
{
init_loc = c_parser_peek_token (parser)->location;
rich_location richloc (line_table, init_loc);
start_init (NULL_TREE, asm_name, global_bindings_p (), &richloc);
/* A parameter is initialized, which is invalid. Don't
attempt to instrument the initializer. */
int flag_sanitize_save = flag_sanitize;
if (nested && !empty_ok)
flag_sanitize = 0;
init = c_parser_expr_no_commas (parser, NULL);
flag_sanitize = flag_sanitize_save;
if (TREE_CODE (init.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (init.value, 1)))
error_at (here,
"%<__auto_type%> used with a bit-field"
" initializer");
init = convert_lvalue_to_rvalue (init_loc, init, true, true);
tree init_type = TREE_TYPE (init.value);
/* As with typeof, remove all qualifiers from atomic types. */
if (init_type != error_mark_node && TYPE_ATOMIC (init_type))
init_type
= c_build_qualified_type (init_type, TYPE_UNQUALIFIED);
bool vm_type = variably_modified_type_p (init_type,
NULL_TREE);
if (vm_type)
init.value = save_expr (init.value);
finish_init ();
specs->typespec_kind = ctsk_typeof;
specs->locations[cdw_typedef] = init_loc;
specs->typedef_p = true;
specs->type = init_type;
if (vm_type)
{
bool maybe_const = true;
tree type_expr = c_fully_fold (init.value, false,
&maybe_const);
specs->expr_const_operands &= maybe_const;
if (specs->expr)
specs->expr = build2 (COMPOUND_EXPR,
TREE_TYPE (type_expr),
specs->expr, type_expr);
else
specs->expr = type_expr;
}
d = start_decl (declarator, specs, true,
chainon (postfix_attrs, all_prefix_attrs));
if (!d)
d = error_mark_node;
if (omp_declare_simd_clauses.exists ())
c_finish_omp_declare_simd (parser, d, NULL_TREE,
omp_declare_simd_clauses);
}
else
{
/* The declaration of the variable is in effect while
its initializer is parsed. */
d = start_decl (declarator, specs, true,
chainon (postfix_attrs, all_prefix_attrs));
if (!d)
d = error_mark_node;
if (omp_declare_simd_clauses.exists ())
c_finish_omp_declare_simd (parser, d, NULL_TREE,
omp_declare_simd_clauses);
init_loc = c_parser_peek_token (parser)->location;
rich_location richloc (line_table, init_loc);
start_init (d, asm_name, global_bindings_p (), &richloc);
/* A parameter is initialized, which is invalid. Don't
attempt to instrument the initializer. */
int flag_sanitize_save = flag_sanitize;
if (TREE_CODE (d) == PARM_DECL)
flag_sanitize = 0;
init = c_parser_initializer (parser);
flag_sanitize = flag_sanitize_save;
finish_init ();
}
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, d, false);
if (d != error_mark_node)
{
maybe_warn_string_init (init_loc, TREE_TYPE (d), init);
finish_decl (d, init_loc, init.value,
init.original_type, asm_name);
}
}
else
{
if (auto_type_p)
{
error_at (here,
"%<__auto_type%> requires an initialized "
"data declaration");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
tree d = start_decl (declarator, specs, false,
chainon (postfix_attrs,
all_prefix_attrs));
if (d && TREE_CODE (d) == FUNCTION_DECL)
if (declarator->kind == cdk_function)
if (DECL_ARGUMENTS (d) == NULL_TREE)
DECL_ARGUMENTS (d) = declarator->u.arg_info->parms;
if (omp_declare_simd_clauses.exists ())
{
tree parms = NULL_TREE;
if (d && TREE_CODE (d) == FUNCTION_DECL)
{
struct c_declarator *ce = declarator;
while (ce != NULL)
if (ce->kind == cdk_function)
{
parms = ce->u.arg_info->parms;
break;
}
else
ce = ce->declarator;
}
if (parms)
temp_store_parm_decls (d, parms);
c_finish_omp_declare_simd (parser, d, parms,
omp_declare_simd_clauses);
if (parms)
temp_pop_parm_decls ();
}
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, d, false);
if (d)
finish_decl (d, UNKNOWN_LOCATION, NULL_TREE,
NULL_TREE, asm_name);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
if (d)
*objc_foreach_object_declaration = d;
else
*objc_foreach_object_declaration = error_mark_node;
}
}
if (c_parser_next_token_is (parser, CPP_COMMA))
{
if (auto_type_p)
{
error_at (here,
"%<__auto_type%> may only be used with"
" a single declarator");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
c_parser_consume_token (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
continue;
}
else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
return;
}
else if (c_parser_next_token_is_keyword (parser, RID_IN))
{
/* This can only happen in Objective-C: we found the
'in' that terminates the declaration inside an
Objective-C foreach statement. Do not consume the
token, so that the caller can use it to determine
that this indeed is a foreach context. */
return;
}
else
{
c_parser_error (parser, "expected %<,%> or %<;%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
}
else if (auto_type_p)
{
error_at (here,
"%<__auto_type%> requires an initialized data declaration");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
else if (!fndef_ok)
{
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, "
"%<asm%> or %<__attribute__%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* Function definition (nested or otherwise). */
if (nested)
{
pedwarn (here, OPT_Wpedantic, "ISO C forbids nested functions");
c_push_function_context ();
}
if (!start_function (specs, declarator, all_prefix_attrs))
{
/* At this point we've consumed:
declaration-specifiers declarator
and the next token isn't CPP_EQ, CPP_COMMA, CPP_SEMICOLON,
RID_ASM, RID_ATTRIBUTE, or RID_IN,
but the
declaration-specifiers declarator
aren't grokkable as a function definition, so we have
an error. */
gcc_assert (!c_parser_next_token_is (parser, CPP_SEMICOLON));
if (c_parser_next_token_starts_declspecs (parser))
{
/* If we have
declaration-specifiers declarator decl-specs
then assume we have a missing semicolon, which would
give us:
declaration-specifiers declarator decl-specs
^
;
<~~~~~~~~~ declaration ~~~~~~~~~~>
Use c_parser_require to get an error with a fix-it hint. */
c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>");
parser->error = false;
}
else
{
/* This can appear in many cases looking nothing like a
function definition, so we don't give a more specific
error suggesting there was one. */
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> "
"or %<__attribute__%>");
}
if (nested)
c_pop_function_context ();
break;
}
if (DECL_DECLARED_INLINE_P (current_function_decl))
tv = TV_PARSE_INLINE;
else
tv = TV_PARSE_FUNC;
auto_timevar at (g_timer, tv);
/* Parse old-style parameter declarations. ??? Attributes are
not allowed to start declaration specifiers here because of a
syntax conflict between a function declaration with attribute
suffix and a function definition with an attribute prefix on
first old-style parameter declaration. Following the old
parser, they are not accepted on subsequent old-style
parameter declarations either. However, there is no
ambiguity after the first declaration, nor indeed on the
first as long as we don't allow postfix attributes after a
declarator with a nonempty identifier list in a definition;
and postfix attributes have never been accepted here in
function definitions either. */
while (c_parser_next_token_is_not (parser, CPP_EOF)
&& c_parser_next_token_is_not (parser, CPP_OPEN_BRACE))
c_parser_declaration_or_fndef (parser, false, false, false,
true, false, NULL, vNULL);
store_parm_decls ();
if (omp_declare_simd_clauses.exists ())
c_finish_omp_declare_simd (parser, current_function_decl, NULL_TREE,
omp_declare_simd_clauses);
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, current_function_decl, true);
DECL_STRUCT_FUNCTION (current_function_decl)->function_start_locus
= c_parser_peek_token (parser)->location;
/* If the definition was marked with __GIMPLE then parse the
function body as GIMPLE. */
if (specs->gimple_p)
{
cfun->pass_startwith = specs->gimple_or_rtl_pass;
bool saved = in_late_binary_op;
in_late_binary_op = true;
c_parser_parse_gimple_body (parser);
in_late_binary_op = saved;
}
/* Similarly, if it was marked with __RTL, use the RTL parser now,
consuming the function body. */
else if (specs->rtl_p)
{
c_parser_parse_rtl_body (parser, specs->gimple_or_rtl_pass);
/* Normally, store_parm_decls sets next_is_function_body,
anticipating a function body. We need a push_scope/pop_scope
pair to flush out this state, or subsequent function parsing
will go wrong. */
push_scope ();
pop_scope ();
finish_function ();
return;
}
else
fnbody = c_parser_compound_statement (parser);
tree fndecl = current_function_decl;
if (nested)
{
tree decl = current_function_decl;
/* Mark nested functions as needing static-chain initially.
lower_nested_functions will recompute it but the
DECL_STATIC_CHAIN flag is also used before that happens,
by initializer_constant_valid_p. See gcc.dg/nested-fn-2.c. */
DECL_STATIC_CHAIN (decl) = 1;
add_stmt (fnbody);
finish_function ();
c_pop_function_context ();
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl));
}
else
{
if (fnbody)
add_stmt (fnbody);
finish_function ();
}
/* Get rid of the empty stmt list for GIMPLE. */
if (specs->gimple_p)
DECL_SAVED_TREE (fndecl) = NULL_TREE;
break;
}
}
/* Parse an asm-definition (asm() outside a function body). This is a
GNU extension.
asm-definition:
simple-asm-expr ;
*/
static void
c_parser_asm_definition (c_parser *parser)
{
tree asm_str = c_parser_simple_asm_expr (parser);
if (asm_str)
symtab->finalize_toplevel_asm (asm_str);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse a static assertion (C11 6.7.10).
static_assert-declaration:
static_assert-declaration-no-semi ;
*/
static void
c_parser_static_assert_declaration (c_parser *parser)
{
c_parser_static_assert_declaration_no_semi (parser);
if (parser->error
|| !c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
}
/* Parse a static assertion (C11 6.7.10), without the trailing
semicolon.
static_assert-declaration-no-semi:
_Static_assert ( constant-expression , string-literal )
*/
static void
c_parser_static_assert_declaration_no_semi (c_parser *parser)
{
location_t assert_loc, value_loc;
tree value;
tree string;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT));
assert_loc = c_parser_peek_token (parser)->location;
if (flag_isoc99)
pedwarn_c99 (assert_loc, OPT_Wpedantic,
"ISO C99 does not support %<_Static_assert%>");
else
pedwarn_c99 (assert_loc, OPT_Wpedantic,
"ISO C90 does not support %<_Static_assert%>");
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
return;
location_t value_tok_loc = c_parser_peek_token (parser)->location;
value = c_parser_expr_no_commas (parser, NULL).value;
value_loc = EXPR_LOC_OR_LOC (value, value_tok_loc);
parser->lex_untranslated_string = true;
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
parser->lex_untranslated_string = false;
return;
}
switch (c_parser_peek_token (parser)->type)
{
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
string = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
parser->lex_untranslated_string = false;
break;
default:
c_parser_error (parser, "expected string literal");
parser->lex_untranslated_string = false;
return;
}
parens.require_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (value)))
{
error_at (value_loc, "expression in static assertion is not an integer");
return;
}
if (TREE_CODE (value) != INTEGER_CST)
{
value = c_fully_fold (value, false, NULL);
/* Strip no-op conversions. */
STRIP_TYPE_NOPS (value);
if (TREE_CODE (value) == INTEGER_CST)
pedwarn (value_loc, OPT_Wpedantic, "expression in static assertion "
"is not an integer constant expression");
}
if (TREE_CODE (value) != INTEGER_CST)
{
error_at (value_loc, "expression in static assertion is not constant");
return;
}
constant_expression_warning (value);
if (integer_zerop (value))
error_at (assert_loc, "static assertion failed: %E", string);
}
/* Parse some declaration specifiers (possibly none) (C90 6.5, C99
6.7, C11 6.7), adding them to SPECS (which may already include some).
Storage class specifiers are accepted iff SCSPEC_OK; type
specifiers are accepted iff TYPESPEC_OK; alignment specifiers are
accepted iff ALIGNSPEC_OK; attributes are accepted at the start
iff START_ATTR_OK; __auto_type is accepted iff AUTO_TYPE_OK.
declaration-specifiers:
storage-class-specifier declaration-specifiers[opt]
type-specifier declaration-specifiers[opt]
type-qualifier declaration-specifiers[opt]
function-specifier declaration-specifiers[opt]
alignment-specifier declaration-specifiers[opt]
Function specifiers (inline) are from C99, and are currently
handled as storage class specifiers, as is __thread. Alignment
specifiers are from C11.
C90 6.5.1, C99 6.7.1, C11 6.7.1:
storage-class-specifier:
typedef
extern
static
auto
register
_Thread_local
(_Thread_local is new in C11.)
C99 6.7.4, C11 6.7.4:
function-specifier:
inline
_Noreturn
(_Noreturn is new in C11.)
C90 6.5.2, C99 6.7.2, C11 6.7.2:
type-specifier:
void
char
short
int
long
float
double
signed
unsigned
_Bool
_Complex
[_Imaginary removed in C99 TC2]
struct-or-union-specifier
enum-specifier
typedef-name
atomic-type-specifier
(_Bool and _Complex are new in C99.)
(atomic-type-specifier is new in C11.)
C90 6.5.3, C99 6.7.3, C11 6.7.3:
type-qualifier:
const
restrict
volatile
address-space-qualifier
_Atomic
(restrict is new in C99.)
(_Atomic is new in C11.)
GNU extensions:
declaration-specifiers:
attributes declaration-specifiers[opt]
type-qualifier:
address-space
address-space:
identifier recognized by the target
storage-class-specifier:
__thread
type-specifier:
typeof-specifier
__auto_type
__intN
_Decimal32
_Decimal64
_Decimal128
_Fract
_Accum
_Sat
(_Fract, _Accum, and _Sat are new from ISO/IEC DTR 18037:
http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1169.pdf)
atomic-type-specifier
_Atomic ( type-name )
Objective-C:
type-specifier:
class-name objc-protocol-refs[opt]
typedef-name objc-protocol-refs
objc-protocol-refs
*/
void
c_parser_declspecs (c_parser *parser, struct c_declspecs *specs,
bool scspec_ok, bool typespec_ok, bool start_attr_ok,
bool alignspec_ok, bool auto_type_ok,
enum c_lookahead_kind la)
{
bool attrs_ok = start_attr_ok;
bool seen_type = specs->typespec_kind != ctsk_none;
if (!typespec_ok)
gcc_assert (la == cla_prefer_id);
while (c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD)
|| (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS)))
{
struct c_typespec t;
tree attrs;
tree align;
location_t loc = c_parser_peek_token (parser)->location;
/* If we cannot accept a type, exit if the next token must start
one. Also, if we already have seen a tagged definition,
a typename would be an error anyway and likely the user
has simply forgotten a semicolon, so we exit. */
if ((!typespec_ok || specs->typespec_kind == ctsk_tagdef)
&& c_parser_next_tokens_start_typename (parser, la)
&& !c_parser_next_token_is_qualifier (parser)
&& !c_parser_next_token_is_keyword (parser, RID_ALIGNAS))
break;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *name_token = c_parser_peek_token (parser);
tree value = name_token->value;
c_id_kind kind = name_token->id_kind;
if (kind == C_ID_ADDRSPACE)
{
addr_space_t as
= name_token->keyword - RID_FIRST_ADDR_SPACE;
declspecs_add_addrspace (name_token->location, specs, as);
c_parser_consume_token (parser);
attrs_ok = true;
continue;
}
gcc_assert (!c_parser_next_token_is_qualifier (parser));
/* If we cannot accept a type, and the next token must start one,
exit. Do the same if we already have seen a tagged definition,
since it would be an error anyway and likely the user has simply
forgotten a semicolon. */
if (seen_type || !c_parser_next_tokens_start_typename (parser, la))
break;
/* Now at an unknown typename (C_ID_ID), a C_ID_TYPENAME or
a C_ID_CLASSNAME. */
c_parser_consume_token (parser);
seen_type = true;
attrs_ok = true;
if (kind == C_ID_ID)
{
error_at (loc, "unknown type name %qE", value);
t.kind = ctsk_typedef;
t.spec = error_mark_node;
}
else if (kind == C_ID_TYPENAME
&& (!c_dialect_objc ()
|| c_parser_next_token_is_not (parser, CPP_LESS)))
{
t.kind = ctsk_typedef;
/* For a typedef name, record the meaning, not the name.
In case of 'foo foo, bar;'. */
t.spec = lookup_name (value);
}
else
{
tree proto = NULL_TREE;
gcc_assert (c_dialect_objc ());
t.kind = ctsk_objc;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
t.spec = objc_get_protocol_qualified_type (value, proto);
}
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (name_token->location, specs, t);
continue;
}
if (c_parser_next_token_is (parser, CPP_LESS))
{
/* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" -
nisse@lysator.liu.se. */
tree proto;
gcc_assert (c_dialect_objc ());
if (!typespec_ok || seen_type)
break;
proto = c_parser_objc_protocol_refs (parser);
t.kind = ctsk_objc;
t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto);
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (loc, specs, t);
continue;
}
gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD));
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_NORETURN:
case RID_AUTO:
case RID_THREAD:
if (!scspec_ok)
goto out;
attrs_ok = true;
/* TODO: Distinguish between function specifiers (inline, noreturn)
and storage class specifiers, either here or in
declspecs_add_scspec. */
declspecs_add_scspec (loc, specs,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_AUTO_TYPE:
if (!auto_type_ok)
goto out;
/* Fall through. */
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
CASE_RID_FLOATN_NX:
case RID_BOOL:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
if (c_dialect_objc ())
parser->objc_need_raw_identifier = true;
t.kind = ctsk_resword;
t.spec = c_parser_peek_token (parser)->value;
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (loc, specs, t);
c_parser_consume_token (parser);
break;
case RID_ENUM:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_enum_specifier (parser);
invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec);
declspecs_add_type (loc, specs, t);
break;
case RID_STRUCT:
case RID_UNION:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_struct_or_union_specifier (parser);
invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec);
declspecs_add_type (loc, specs, t);
break;
case RID_TYPEOF:
/* ??? The old parser rejected typeof after other type
specifiers, but is a syntax error the best way of
handling this? */
if (!typespec_ok || seen_type)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_typeof_specifier (parser);
declspecs_add_type (loc, specs, t);
break;
case RID_ATOMIC:
/* C parser handling of Objective-C constructs needs
checking for correct lvalue-to-rvalue conversions, and
the code in build_modify_expr handling various
Objective-C cases, and that in build_unary_op handling
Objective-C cases for increment / decrement, also needs
updating; uses of TYPE_MAIN_VARIANT in objc_compare_types
and objc_types_are_equivalent may also need updates. */
if (c_dialect_objc ())
sorry ("%<_Atomic%> in Objective-C");
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support the %<_Atomic%> qualifier");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support the %<_Atomic%> qualifier");
attrs_ok = true;
tree value;
value = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (typespec_ok && c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
/* _Atomic ( type-name ). */
seen_type = true;
c_parser_consume_token (parser);
struct c_type_name *type = c_parser_type_name (parser);
t.kind = ctsk_typeof;
t.spec = error_mark_node;
t.expr = NULL_TREE;
t.expr_const_operands = true;
if (type != NULL)
t.spec = groktypename (type, &t.expr,
&t.expr_const_operands);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (t.spec != error_mark_node)
{
if (TREE_CODE (t.spec) == ARRAY_TYPE)
error_at (loc, "%<_Atomic%>-qualified array type");
else if (TREE_CODE (t.spec) == FUNCTION_TYPE)
error_at (loc, "%<_Atomic%>-qualified function type");
else if (TYPE_QUALS (t.spec) != TYPE_UNQUALIFIED)
error_at (loc, "%<_Atomic%> applied to a qualified type");
else
t.spec = c_build_qualified_type (t.spec, TYPE_QUAL_ATOMIC);
}
declspecs_add_type (loc, specs, t);
}
else
declspecs_add_qual (loc, specs, value);
break;
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
attrs_ok = true;
declspecs_add_qual (loc, specs, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_ATTRIBUTE:
if (!attrs_ok)
goto out;
attrs = c_parser_attributes (parser);
declspecs_add_attrs (loc, specs, attrs);
break;
case RID_ALIGNAS:
if (!alignspec_ok)
goto out;
align = c_parser_alignas_specifier (parser);
declspecs_add_alignas (loc, specs, align);
break;
case RID_GIMPLE:
if (! flag_gimple)
error_at (loc, "%<__GIMPLE%> only valid with -fgimple");
c_parser_consume_token (parser);
specs->gimple_p = true;
specs->locations[cdw_gimple] = loc;
specs->gimple_or_rtl_pass = c_parser_gimple_or_rtl_pass_list (parser);
break;
case RID_RTL:
c_parser_consume_token (parser);
specs->rtl_p = true;
specs->locations[cdw_rtl] = loc;
specs->gimple_or_rtl_pass = c_parser_gimple_or_rtl_pass_list (parser);
break;
default:
goto out;
}
}
out: ;
}
/* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2, C11 6.7.2.2).
enum-specifier:
enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt]
enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt]
enum attributes[opt] identifier
The form with trailing comma is new in C99. The forms with
attributes are GNU extensions. In GNU C, we accept any expression
without commas in the syntax (assignment expressions, not just
conditional expressions); assignment expressions will be diagnosed
as non-constant.
enumerator-list:
enumerator
enumerator-list , enumerator
enumerator:
enumeration-constant
enumeration-constant = constant-expression
GNU Extensions:
enumerator:
enumeration-constant attributes[opt]
enumeration-constant attributes[opt] = constant-expression
*/
static struct c_typespec
c_parser_enum_specifier (c_parser *parser)
{
struct c_typespec ret;
tree attrs;
tree ident = NULL_TREE;
location_t enum_loc;
location_t ident_loc = UNKNOWN_LOCATION; /* Quiet warning. */
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM));
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
enum_loc = c_parser_peek_token (parser)->location;
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (c_parser_peek_token (parser));
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
ident_loc = c_parser_peek_token (parser)->location;
enum_loc = ident_loc;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse an enum definition. */
struct c_enum_contents the_enum;
tree type;
tree postfix_attrs;
/* We chain the enumerators in reverse order, then put them in
forward order at the end. */
tree values;
timevar_push (TV_PARSE_ENUM);
type = start_enum (enum_loc, &the_enum, ident);
values = NULL_TREE;
c_parser_consume_token (parser);
while (true)
{
tree enum_id;
tree enum_value;
tree enum_decl;
bool seen_comma;
c_token *token;
location_t comma_loc = UNKNOWN_LOCATION; /* Quiet warning. */
location_t decl_loc, value_loc;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
/* Give a nicer error for "enum {}". */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
&& !parser->error)
{
error_at (c_parser_peek_token (parser)->location,
"empty enum is invalid");
parser->error = true;
}
else
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
token = c_parser_peek_token (parser);
enum_id = token->value;
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (token);
decl_loc = value_loc = token->location;
c_parser_consume_token (parser);
/* Parse any specified attributes. */
tree enum_attrs = c_parser_attributes (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
c_parser_consume_token (parser);
value_loc = c_parser_peek_token (parser)->location;
enum_value = c_parser_expr_no_commas (parser, NULL).value;
}
else
enum_value = NULL_TREE;
enum_decl = build_enumerator (decl_loc, value_loc,
&the_enum, enum_id, enum_value);
if (enum_attrs)
decl_attributes (&TREE_PURPOSE (enum_decl), enum_attrs, 0);
TREE_CHAIN (enum_decl) = values;
values = enum_decl;
seen_comma = false;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
comma_loc = c_parser_peek_token (parser)->location;
seen_comma = true;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
if (seen_comma)
pedwarn_c90 (comma_loc, OPT_Wpedantic,
"comma at end of enumerator list");
c_parser_consume_token (parser);
break;
}
if (!seen_comma)
{
c_parser_error (parser, "expected %<,%> or %<}%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
}
postfix_attrs = c_parser_attributes (parser);
ret.spec = finish_enum (type, nreverse (values),
chainon (attrs, postfix_attrs));
ret.kind = ctsk_tagdef;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
timevar_pop (TV_PARSE_ENUM);
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
return ret;
}
ret = parser_xref_tag (ident_loc, ENUMERAL_TYPE, ident);
/* In ISO C, enumerated types can be referred to only if already
defined. */
if (pedantic && !COMPLETE_TYPE_P (ret.spec))
{
gcc_assert (ident);
pedwarn (enum_loc, OPT_Wpedantic,
"ISO C forbids forward references to %<enum%> types");
}
return ret;
}
/* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1, C11 6.7.2.1).
struct-or-union-specifier:
struct-or-union attributes[opt] identifier[opt]
{ struct-contents } attributes[opt]
struct-or-union attributes[opt] identifier
struct-contents:
struct-declaration-list
struct-declaration-list:
struct-declaration ;
struct-declaration-list struct-declaration ;
GNU extensions:
struct-contents:
empty
struct-declaration
struct-declaration-list struct-declaration
struct-declaration-list:
struct-declaration-list ;
;
(Note that in the syntax here, unlike that in ISO C, the semicolons
are included here rather than in struct-declaration, in order to
describe the syntax with extra semicolons and missing semicolon at
end.)
Objective-C:
struct-declaration-list:
@defs ( class-name )
(Note this does not include a trailing semicolon, but can be
followed by further declarations, and gets a pedwarn-if-pedantic
when followed by a semicolon.) */
static struct c_typespec
c_parser_struct_or_union_specifier (c_parser *parser)
{
struct c_typespec ret;
tree attrs;
tree ident = NULL_TREE;
location_t struct_loc;
location_t ident_loc = UNKNOWN_LOCATION;
enum tree_code code;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STRUCT:
code = RECORD_TYPE;
break;
case RID_UNION:
code = UNION_TYPE;
break;
default:
gcc_unreachable ();
}
struct_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (c_parser_peek_token (parser));
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
ident_loc = c_parser_peek_token (parser)->location;
struct_loc = ident_loc;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse a struct or union definition. Start the scope of the
tag before parsing components. */
struct c_struct_parse_info *struct_info;
tree type = start_struct (struct_loc, code, ident, &struct_info);
tree postfix_attrs;
/* We chain the components in reverse order, then put them in
forward order at the end. Each struct-declaration may
declare multiple components (comma-separated), so we must use
chainon to join them, although when parsing each
struct-declaration we can use TREE_CHAIN directly.
The theory behind all this is that there will be more
semicolon separated fields than comma separated fields, and
so we'll be minimizing the number of node traversals required
by chainon. */
tree contents;
timevar_push (TV_PARSE_STRUCT);
contents = NULL_TREE;
c_parser_consume_token (parser);
/* Handle the Objective-C @defs construct,
e.g. foo(sizeof(struct{ @defs(ClassName) }));. */
if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS))
{
tree name;
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
goto end_at_defs;
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected class name");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto end_at_defs;
}
parens.skip_until_found_close (parser);
contents = nreverse (objc_get_class_ivars (name));
}
end_at_defs:
/* Parse the struct-declarations and semicolons. Problems with
semicolons are diagnosed here; empty structures are diagnosed
elsewhere. */
while (true)
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
location_t semicolon_loc
= c_parser_peek_token (parser)->location;
gcc_rich_location richloc (semicolon_loc);
richloc.add_fixit_remove ();
pedwarn (&richloc, OPT_Wpedantic,
"extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the struct or union contents. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Accept #pragmas at struct scope. */
if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_struct, NULL);
continue;
}
/* Parse some comma-separated declarations, but not the
trailing semicolon if any. */
decls = c_parser_struct_declaration (parser);
contents = chainon (decls, contents);
/* If no semicolon follows, either we have a parse error or
are at the end of the struct or union and should
pedwarn. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
pedwarn (c_parser_peek_token (parser)->location, 0,
"no semicolon at end of struct or union");
else if (parser->error
|| !c_parser_next_token_starts_declspecs (parser))
{
c_parser_error (parser, "expected %<;%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
break;
}
/* If we come here, we have already emitted an error
for an expected `;', identifier or `(', and we also
recovered already. Go on with the next field. */
}
}
postfix_attrs = c_parser_attributes (parser);
ret.spec = finish_struct (struct_loc, type, nreverse (contents),
chainon (attrs, postfix_attrs), struct_info);
ret.kind = ctsk_tagdef;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
timevar_pop (TV_PARSE_STRUCT);
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
return ret;
}
ret = parser_xref_tag (ident_loc, code, ident);
return ret;
}
/* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1, C11 6.7.2.1),
*without* the trailing semicolon.
struct-declaration:
specifier-qualifier-list struct-declarator-list
static_assert-declaration-no-semi
specifier-qualifier-list:
type-specifier specifier-qualifier-list[opt]
type-qualifier specifier-qualifier-list[opt]
alignment-specifier specifier-qualifier-list[opt]
attributes specifier-qualifier-list[opt]
struct-declarator-list:
struct-declarator
struct-declarator-list , attributes[opt] struct-declarator
struct-declarator:
declarator attributes[opt]
declarator[opt] : constant-expression attributes[opt]
GNU extensions:
struct-declaration:
__extension__ struct-declaration
specifier-qualifier-list
Unlike the ISO C syntax, semicolons are handled elsewhere. The use
of attributes where shown is a GNU extension. In GNU C, we accept
any expression without commas in the syntax (assignment
expressions, not just conditional expressions); assignment
expressions will be diagnosed as non-constant. */
static tree
c_parser_struct_declaration (c_parser *parser)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
tree decls;
location_t decl_loc;
if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
int ext;
tree decl;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
decl = c_parser_struct_declaration (parser);
restore_extension_diagnostics (ext);
return decl;
}
if (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT))
{
c_parser_static_assert_declaration_no_semi (parser);
return NULL_TREE;
}
specs = build_null_declspecs ();
decl_loc = c_parser_peek_token (parser)->location;
/* Strictly by the standard, we shouldn't allow _Alignas here,
but it appears to have been intended to allow it there, so
we're keeping it as it is until WG14 reaches a conclusion
of N1731.
<http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1731.pdf> */
c_parser_declspecs (parser, specs, false, true, true,
true, false, cla_nonabstract_decl);
if (parser->error)
return NULL_TREE;
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL_TREE;
}
finish_declspecs (specs);
if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
tree ret;
if (specs->typespec_kind == ctsk_none)
{
pedwarn (decl_loc, OPT_Wpedantic,
"ISO C forbids member declarations with no members");
shadow_tag_warned (specs, pedantic);
ret = NULL_TREE;
}
else
{
/* Support for unnamed structs or unions as members of
structs or unions (which is [a] useful and [b] supports
MS P-SDK). */
tree attrs = NULL;
ret = grokfield (c_parser_peek_token (parser)->location,
build_id_declarator (NULL_TREE), specs,
NULL_TREE, &attrs);
if (ret)
decl_attributes (&ret, attrs, 0);
}
return ret;
}
/* Provide better error recovery. Note that a type name here is valid,
and will be treated as a field name. */
if (specs->typespec_kind == ctsk_tagdef
&& TREE_CODE (specs->type) != ENUMERAL_TYPE
&& c_parser_next_token_starts_declspecs (parser)
&& !c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<;%>, identifier or %<(%>");
parser->error = false;
return NULL_TREE;
}
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
decls = NULL_TREE;
while (true)
{
/* Declaring one or more declarators or un-named bit-fields. */
struct c_declarator *declarator;
bool dummy = false;
if (c_parser_next_token_is (parser, CPP_COLON))
declarator = build_id_declarator (NULL_TREE);
else
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
c_parser_skip_to_end_of_block_or_statement (parser);
break;
}
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
tree postfix_attrs = NULL_TREE;
tree width = NULL_TREE;
tree d;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
width = c_parser_expr_no_commas (parser, NULL).value;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
d = grokfield (c_parser_peek_token (parser)->location,
declarator, specs, width, &all_prefix_attrs);
decl_attributes (&d, chainon (postfix_attrs,
all_prefix_attrs), 0);
DECL_CHAIN (d) = decls;
decls = d;
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
/* Semicolon consumed in caller. */
break;
}
else
{
c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>");
break;
}
}
else
{
c_parser_error (parser,
"expected %<:%>, %<,%>, %<;%>, %<}%> or "
"%<__attribute__%>");
break;
}
}
return decls;
}
/* Parse a typeof specifier (a GNU extension).
typeof-specifier:
typeof ( expression )
typeof ( type-name )
*/
static struct c_typespec
c_parser_typeof_specifier (c_parser *parser)
{
struct c_typespec ret;
ret.kind = ctsk_typeof;
ret.spec = error_mark_node;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF));
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_typeof++;
matching_parens parens;
if (!parens.require_open (parser))
{
c_inhibit_evaluation_warnings--;
in_typeof--;
return ret;
}
if (c_parser_next_tokens_start_typename (parser, cla_prefer_id))
{
struct c_type_name *type = c_parser_type_name (parser);
c_inhibit_evaluation_warnings--;
in_typeof--;
if (type != NULL)
{
ret.spec = groktypename (type, &ret.expr, &ret.expr_const_operands);
pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE));
}
}
else
{
bool was_vm;
location_t here = c_parser_peek_token (parser)->location;
struct c_expr expr = c_parser_expression (parser);
c_inhibit_evaluation_warnings--;
in_typeof--;
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error_at (here, "%<typeof%> applied to a bit-field");
mark_exp_read (expr.value);
ret.spec = TREE_TYPE (expr.value);
was_vm = variably_modified_type_p (ret.spec, NULL_TREE);
/* This is returned with the type so that when the type is
evaluated, this can be evaluated. */
if (was_vm)
ret.expr = c_fully_fold (expr.value, false, &ret.expr_const_operands);
pop_maybe_used (was_vm);
/* For use in macros such as those in <stdatomic.h>, remove all
qualifiers from atomic types. (const can be an issue for more macros
using typeof than just the <stdatomic.h> ones.) */
if (ret.spec != error_mark_node && TYPE_ATOMIC (ret.spec))
ret.spec = c_build_qualified_type (ret.spec, TYPE_UNQUALIFIED);
}
parens.skip_until_found_close (parser);
return ret;
}
/* Parse an alignment-specifier.
C11 6.7.5:
alignment-specifier:
_Alignas ( type-name )
_Alignas ( constant-expression )
*/
static tree
c_parser_alignas_specifier (c_parser * parser)
{
tree ret = error_mark_node;
location_t loc = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNAS));
c_parser_consume_token (parser);
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %<_Alignas%>");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %<_Alignas%>");
matching_parens parens;
if (!parens.require_open (parser))
return ret;
if (c_parser_next_tokens_start_typename (parser, cla_prefer_id))
{
struct c_type_name *type = c_parser_type_name (parser);
if (type != NULL)
ret = c_sizeof_or_alignof_type (loc, groktypename (type, NULL, NULL),
false, true, 1);
}
else
ret = c_parser_expr_no_commas (parser, NULL).value;
parens.skip_until_found_close (parser);
return ret;
}
/* Parse a declarator, possibly an abstract declarator (C90 6.5.4,
6.5.5, C99 6.7.5, 6.7.6, C11 6.7.6, 6.7.7). If TYPE_SEEN_P then
a typedef name may be redeclared; otherwise it may not. KIND
indicates which kind of declarator is wanted. Returns a valid
declarator except in the case of a syntax error in which case NULL is
returned. *SEEN_ID is set to true if an identifier being declared is
seen; this is used to diagnose bad forms of abstract array declarators
and to determine whether an identifier list is syntactically permitted.
declarator:
pointer[opt] direct-declarator
direct-declarator:
identifier
( attributes[opt] declarator )
direct-declarator array-declarator
direct-declarator ( parameter-type-list )
direct-declarator ( identifier-list[opt] )
pointer:
* type-qualifier-list[opt]
* type-qualifier-list[opt] pointer
type-qualifier-list:
type-qualifier
attributes
type-qualifier-list type-qualifier
type-qualifier-list attributes
array-declarator:
[ type-qualifier-list[opt] assignment-expression[opt] ]
[ static type-qualifier-list[opt] assignment-expression ]
[ type-qualifier-list static assignment-expression ]
[ type-qualifier-list[opt] * ]
parameter-type-list:
parameter-list
parameter-list , ...
parameter-list:
parameter-declaration
parameter-list , parameter-declaration
parameter-declaration:
declaration-specifiers declarator attributes[opt]
declaration-specifiers abstract-declarator[opt] attributes[opt]
identifier-list:
identifier
identifier-list , identifier
abstract-declarator:
pointer
pointer[opt] direct-abstract-declarator
direct-abstract-declarator:
( attributes[opt] abstract-declarator )
direct-abstract-declarator[opt] array-declarator
direct-abstract-declarator[opt] ( parameter-type-list[opt] )
GNU extensions:
direct-declarator:
direct-declarator ( parameter-forward-declarations
parameter-type-list[opt] )
direct-abstract-declarator:
direct-abstract-declarator[opt] ( parameter-forward-declarations
parameter-type-list[opt] )
parameter-forward-declarations:
parameter-list ;
parameter-forward-declarations parameter-list ;
The uses of attributes shown above are GNU extensions.
Some forms of array declarator are not included in C99 in the
syntax for abstract declarators; these are disallowed elsewhere.
This may be a defect (DR#289).
This function also accepts an omitted abstract declarator as being
an abstract declarator, although not part of the formal syntax. */
struct c_declarator *
c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* Parse any initial pointer part. */
if (c_parser_next_token_is (parser, CPP_MULT))
{
struct c_declspecs *quals_attrs = build_null_declspecs ();
struct c_declarator *inner;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true,
false, false, cla_prefer_id);
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner == NULL)
return NULL;
else
return make_pointer_declarator (quals_attrs, inner);
}
/* Now we have a direct declarator, direct abstract declarator or
nothing (which counts as a direct abstract declarator here). */
return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id);
}
/* Parse a direct declarator or direct abstract declarator; arguments
as c_parser_declarator. */
static struct c_declarator *
c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* The direct declarator must start with an identifier (possibly
omitted) or a parenthesized declarator (possibly abstract). In
an ordinary declarator, initial parentheses must start a
parenthesized declarator. In an abstract declarator or parameter
declarator, they could start a parenthesized declarator or a
parameter list. To tell which, the open parenthesis and any
following attributes must be read. If a declaration specifier
follows, then it is a parameter list; if the specifier is a
typedef name, there might be an ambiguity about redeclaring it,
which is resolved in the direction of treating it as a typedef
name. If a close parenthesis follows, it is also an empty
parameter list, as the syntax does not permit empty abstract
declarators. Otherwise, it is a parenthesized declarator (in
which case the analysis may be repeated inside it, recursively).
??? There is an ambiguity in a parameter declaration "int
(__attribute__((foo)) x)", where x is not a typedef name: it
could be an abstract declarator for a function, or declare x with
parentheses. The proper resolution of this ambiguity needs
documenting. At present we follow an accident of the old
parser's implementation, whereby the first parameter must have
some declaration specifiers other than just attributes. Thus as
a parameter declaration it is treated as a parenthesized
parameter named x, and as an abstract declarator it is
rejected.
??? Also following the old parser, attributes inside an empty
parameter list are ignored, making it a list not yielding a
prototype, rather than giving an error or making it have one
parameter with implicit type int.
??? Also following the old parser, typedef names may be
redeclared in declarators, but not Objective-C class names. */
if (kind != C_DTR_ABSTRACT
&& c_parser_next_token_is (parser, CPP_NAME)
&& ((type_seen_p
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
|| c_parser_peek_token (parser)->id_kind == C_ID_ID))
{
struct c_declarator *inner
= build_id_declarator (c_parser_peek_token (parser)->value);
*seen_id = true;
inner->id_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
if (kind != C_DTR_NORMAL
&& c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
struct c_declarator *inner = build_id_declarator (NULL_TREE);
inner->id_loc = c_parser_peek_token (parser)->location;
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
/* Either we are at the end of an abstract declarator, or we have
parentheses. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_declarator *inner;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
if (kind != C_DTR_NORMAL
&& (c_parser_next_token_starts_declspecs (parser)
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN)))
{
struct c_arg_info *args
= c_parser_parms_declarator (parser, kind == C_DTR_NORMAL,
attrs);
if (args == NULL)
return NULL;
else
{
inner
= build_function_declarator (args,
build_id_declarator (NULL_TREE));
return c_parser_direct_declarator_inner (parser, *seen_id,
inner);
}
}
/* A parenthesized declarator. */
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner != NULL && attrs != NULL)
inner = build_attrs_declarator (attrs, inner);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (inner == NULL)
return NULL;
else
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
else
{
if (kind == C_DTR_NORMAL)
{
c_parser_error (parser, "expected identifier or %<(%>");
return NULL;
}
else
return build_id_declarator (NULL_TREE);
}
}
/* Parse part of a direct declarator or direct abstract declarator,
given that some (in INNER) has already been parsed; ID_PRESENT is
true if an identifier is present, false for an abstract
declarator. */
static struct c_declarator *
c_parser_direct_declarator_inner (c_parser *parser, bool id_present,
struct c_declarator *inner)
{
/* Parse a sequence of array declarators and parameter lists. */
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
location_t brace_loc = c_parser_peek_token (parser)->location;
struct c_declarator *declarator;
struct c_declspecs *quals_attrs = build_null_declspecs ();
bool static_seen;
bool star_seen;
struct c_expr dimen;
dimen.value = NULL_TREE;
dimen.original_code = ERROR_MARK;
dimen.original_type = NULL_TREE;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true,
false, false, cla_prefer_id);
static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC);
if (static_seen)
c_parser_consume_token (parser);
if (static_seen && !quals_attrs->declspecs_seen_p)
c_parser_declspecs (parser, quals_attrs, false, false, true,
false, false, cla_prefer_id);
if (!quals_attrs->declspecs_seen_p)
quals_attrs = NULL;
/* If "static" is present, there must be an array dimension.
Otherwise, there may be a dimension, "*", or no
dimension. */
if (static_seen)
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL);
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
dimen.value = NULL_TREE;
star_seen = false;
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE)
{
dimen.value = NULL_TREE;
star_seen = true;
c_parser_consume_token (parser);
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL);
}
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL);
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
c_parser_consume_token (parser);
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
return NULL;
}
if (dimen.value)
dimen = convert_lvalue_to_rvalue (brace_loc, dimen, true, true);
declarator = build_array_declarator (brace_loc, dimen.value, quals_attrs,
static_seen, star_seen);
if (declarator == NULL)
return NULL;
inner = set_array_declarator_inner (declarator, inner);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_arg_info *args;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
args = c_parser_parms_declarator (parser, id_present, attrs);
if (args == NULL)
return NULL;
else
{
inner = build_function_declarator (args, inner);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
}
return inner;
}
/* Parse a parameter list or identifier list, including the closing
parenthesis but not the opening one. ATTRS are the attributes at
the start of the list. ID_LIST_OK is true if an identifier list is
acceptable; such a list must not have attributes at the start. */
static struct c_arg_info *
c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs)
{
push_scope ();
declare_parm_level ();
/* If the list starts with an identifier, it is an identifier list.
Otherwise, it is either a prototype list or an empty list. */
if (id_list_ok
&& !attrs
&& c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
/* Look ahead to detect typos in type names. */
&& c_parser_peek_2nd_token (parser)->type != CPP_NAME
&& c_parser_peek_2nd_token (parser)->type != CPP_MULT
&& c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN
&& c_parser_peek_2nd_token (parser)->type != CPP_OPEN_SQUARE
&& c_parser_peek_2nd_token (parser)->type != CPP_KEYWORD)
{
tree list = NULL_TREE, *nextp = &list;
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
*nextp = build_tree_list (NULL_TREE,
c_parser_peek_token (parser)->value);
nextp = & TREE_CHAIN (*nextp);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_error (parser, "expected identifier");
break;
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = build_arg_info ();
ret->types = list;
c_parser_consume_token (parser);
pop_scope ();
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
pop_scope ();
return NULL;
}
}
else
{
struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs,
NULL);
pop_scope ();
return ret;
}
}
/* Parse a parameter list (possibly empty), including the closing
parenthesis but not the opening one. ATTRS are the attributes at
the start of the list. EXPR is NULL or an expression that needs to
be evaluated for the side effects of array size expressions in the
parameters. */
static struct c_arg_info *
c_parser_parms_list_declarator (c_parser *parser, tree attrs, tree expr)
{
bool bad_parm = false;
/* ??? Following the old parser, forward parameter declarations may
use abstract declarators, and if no real parameter declarations
follow the forward declarations then this is not diagnosed. Also
note as above that attributes are ignored as the only contents of
the parentheses, or as the only contents after forward
declarations. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = build_arg_info ();
c_parser_consume_token (parser);
return ret;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
struct c_arg_info *ret = build_arg_info ();
if (flag_allow_parameterless_variadic_functions)
{
/* F (...) is allowed. */
ret->types = NULL_TREE;
}
else
{
/* Suppress -Wold-style-definition for this case. */
ret->types = error_mark_node;
error_at (c_parser_peek_token (parser)->location,
"ISO C requires a named argument before %<...%>");
}
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
/* Nonempty list of parameters, either terminated with semicolon
(forward declarations; recurse) or with close parenthesis (normal
function) or with ", ... )" (variadic function). */
while (true)
{
/* Parse a parameter. */
struct c_parm *parm = c_parser_parameter_declaration (parser, attrs);
attrs = NULL_TREE;
if (parm == NULL)
bad_parm = true;
else
push_parm_decl (parm, &expr);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree new_attrs;
c_parser_consume_token (parser);
mark_forward_parm_decls ();
new_attrs = c_parser_attributes (parser);
return c_parser_parms_list_declarator (parser, new_attrs, expr);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (bad_parm)
return NULL;
else
return get_parm_info (false, expr);
}
if (!c_parser_require (parser, CPP_COMMA,
"expected %<;%>, %<,%> or %<)%>",
UNKNOWN_LOCATION, false))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (bad_parm)
return NULL;
else
return get_parm_info (true, expr);
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
}
}
/* Parse a parameter declaration. ATTRS are the attributes at the
start of the declaration if it is the first parameter. */
static struct c_parm *
c_parser_parameter_declaration (c_parser *parser, tree attrs)
{
struct c_declspecs *specs;
struct c_declarator *declarator;
tree prefix_attrs;
tree postfix_attrs = NULL_TREE;
bool dummy = false;
/* Accept #pragmas between parameter declarations. */
while (c_parser_next_token_is (parser, CPP_PRAGMA))
c_parser_pragma (parser, pragma_param, NULL);
if (!c_parser_next_token_starts_declspecs (parser))
{
c_token *token = c_parser_peek_token (parser);
if (parser->error)
return NULL;
c_parser_set_source_position_from_token (token);
if (c_parser_next_tokens_start_typename (parser, cla_prefer_type))
{
name_hint hint = lookup_name_fuzzy (token->value,
FUZZY_LOOKUP_TYPENAME,
token->location);
if (hint)
{
gcc_rich_location richloc (token->location);
richloc.add_fixit_replace (hint.suggestion ());
error_at (&richloc,
"unknown type name %qE; did you mean %qs?",
token->value, hint.suggestion ());
}
else
error_at (token->location, "unknown type name %qE", token->value);
parser->error = true;
}
/* ??? In some Objective-C cases '...' isn't applicable so there
should be a different message. */
else
c_parser_error (parser,
"expected declaration specifiers or %<...%>");
c_parser_skip_to_end_of_parameter (parser);
return NULL;
}
location_t start_loc = c_parser_peek_token (parser)->location;
specs = build_null_declspecs ();
if (attrs)
{
declspecs_add_attrs (input_location, specs, attrs);
attrs = NULL_TREE;
}
c_parser_declspecs (parser, specs, true, true, true, true, false,
cla_nonabstract_decl);
finish_declspecs (specs);
pending_xref_error ();
prefix_attrs = specs->attrs;
specs->attrs = NULL_TREE;
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_PARM, &dummy);
if (declarator == NULL)
{
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
return NULL;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
/* Generate a location for the parameter, ranging from the start of the
initial token to the end of the final token.
If we have a identifier, then use it for the caret location, e.g.
extern int callee (int one, int (*two)(int, int), float three);
~~~~~~^~~~~~~~~~~~~~
otherwise, reuse the start location for the caret location e.g.:
extern int callee (int one, int (*)(int, int), float three);
^~~~~~~~~~~~~~~~~
*/
location_t end_loc = parser->last_token_location;
/* Find any cdk_id declarator; determine if we have an identifier. */
c_declarator *id_declarator = declarator;
while (id_declarator && id_declarator->kind != cdk_id)
id_declarator = id_declarator->declarator;
location_t caret_loc = (id_declarator->u.id
? id_declarator->id_loc
: start_loc);
location_t param_loc = make_location (caret_loc, start_loc, end_loc);
return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs),
declarator, param_loc);
}
/* Parse a string literal in an asm expression. It should not be
translated, and wide string literals are an error although
permitted by the syntax. This is a GNU extension.
asm-string-literal:
string-literal
??? At present, following the old parser, the caller needs to have
set lex_untranslated_string to 1. It would be better to follow the
C++ parser rather than using this kludge. */
static tree
c_parser_asm_string_literal (c_parser *parser)
{
tree str;
int save_flag = warn_overlength_strings;
warn_overlength_strings = 0;
if (c_parser_next_token_is (parser, CPP_STRING))
{
str = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_WSTRING))
{
error_at (c_parser_peek_token (parser)->location,
"wide string literal in %<asm%>");
str = build_string (1, "");
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected string literal");
str = NULL_TREE;
}
warn_overlength_strings = save_flag;
return str;
}
/* Parse a simple asm expression. This is used in restricted
contexts, where a full expression with inputs and outputs does not
make sense. This is a GNU extension.
simple-asm-expr:
asm ( asm-string-literal )
*/
static tree
c_parser_simple_asm_expr (c_parser *parser)
{
tree str;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
/* ??? Follow the C++ parser rather than using the
lex_untranslated_string kludge. */
parser->lex_untranslated_string = true;
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
parser->lex_untranslated_string = false;
return NULL_TREE;
}
str = c_parser_asm_string_literal (parser);
parser->lex_untranslated_string = false;
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
return str;
}
static tree
c_parser_attribute_any_word (c_parser *parser)
{
tree attr_name = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_KEYWORD))
{
/* ??? See comment above about what keywords are accepted here. */
bool ok;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_UNSIGNED:
case RID_LONG:
case RID_CONST:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_SHORT:
case RID_INLINE:
case RID_NORETURN:
case RID_VOLATILE:
case RID_SIGNED:
case RID_AUTO:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_THREAD:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
CASE_RID_FLOATN_NX:
case RID_BOOL:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_CANCEL:
case RID_ATOMIC:
case RID_AUTO_TYPE:
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
ok = true;
break;
default:
ok = false;
break;
}
if (!ok)
return NULL_TREE;
/* Accept __attribute__((__const)) as __attribute__((const)) etc. */
attr_name = ridpointers[(int) c_parser_peek_token (parser)->keyword];
}
else if (c_parser_next_token_is (parser, CPP_NAME))
attr_name = c_parser_peek_token (parser)->value;
return attr_name;
}
/* Parse (possibly empty) attributes. This is a GNU extension.
attributes:
empty
attributes attribute
attribute:
__attribute__ ( ( attribute-list ) )
attribute-list:
attrib
attribute_list , attrib
attrib:
empty
any-word
any-word ( identifier )
any-word ( identifier , nonempty-expr-list )
any-word ( expr-list )
where the "identifier" must not be declared as a type, and
"any-word" may be any identifier (including one declared as a
type), a reserved word storage class specifier, type specifier or
type qualifier. ??? This still leaves out most reserved keywords
(following the old parser), shouldn't we include them, and why not
allow identifiers declared as types to start the arguments? */
static tree
c_parser_attributes (c_parser *parser)
{
tree attrs = NULL_TREE;
while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
/* ??? Follow the C++ parser rather than using the
lex_untranslated_string kludge. */
parser->lex_untranslated_string = true;
/* Consume the `__attribute__' keyword. */
c_parser_consume_token (parser);
/* Look for the two `(' tokens. */
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
parser->lex_untranslated_string = false;
return attrs;
}
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
parser->lex_untranslated_string = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return attrs;
}
/* Parse the attribute list. */
while (c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD))
{
tree attr, attr_name, attr_args;
vec<tree, va_gc> *expr_list;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
attr_name = c_parser_attribute_any_word (parser);
if (attr_name == NULL)
break;
attr_name = canonicalize_attr_name (attr_name);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
{
attr = build_tree_list (attr_name, NULL_TREE);
/* Add this attribute to the list. */
attrs = chainon (attrs, attr);
/* If the next token isn't a comma, we're done. */
if (!c_parser_next_token_is (parser, CPP_COMMA))
break;
continue;
}
c_parser_consume_token (parser);
/* Parse the attribute contents. If they start with an
identifier which is followed by a comma or close
parenthesis, then the arguments start with that
identifier; otherwise they are an expression list.
In objective-c the identifier may be a classname. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& (c_parser_peek_token (parser)->id_kind == C_ID_ID
|| (c_dialect_objc ()
&& c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME))
&& ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA)
|| (c_parser_peek_2nd_token (parser)->type
== CPP_CLOSE_PAREN))
&& (attribute_takes_identifier_p (attr_name)
|| (c_dialect_objc ()
&& c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME)))
{
tree arg1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = build_tree_list (NULL_TREE, arg1);
else
{
tree tree_list;
c_parser_consume_token (parser);
expr_list = c_parser_expr_list (parser, false, true,
NULL, NULL, NULL, NULL);
tree_list = build_tree_list_vec (expr_list);
attr_args = tree_cons (NULL_TREE, arg1, tree_list);
release_tree_vector (expr_list);
}
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = NULL_TREE;
else
{
expr_list = c_parser_expr_list (parser, false, true,
NULL, NULL, NULL, NULL);
attr_args = build_tree_list_vec (expr_list);
release_tree_vector (expr_list);
}
}
attr = build_tree_list (attr_name, attr_args);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
parser->lex_untranslated_string = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
/* Add this attribute to the list. */
attrs = chainon (attrs, attr);
/* If the next token isn't a comma, we're done. */
if (!c_parser_next_token_is (parser, CPP_COMMA))
break;
}
/* Look for the two `)' tokens. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
parser->lex_untranslated_string = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
parser->lex_untranslated_string = false;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
parser->lex_untranslated_string = false;
}
return attrs;
}
/* Parse a type name (C90 6.5.5, C99 6.7.6, C11 6.7.7). ALIGNAS_OK
says whether alignment specifiers are OK (only in cases that might
be the type name of a compound literal).
type-name:
specifier-qualifier-list abstract-declarator[opt]
*/
struct c_type_name *
c_parser_type_name (c_parser *parser, bool alignas_ok)
{
struct c_declspecs *specs = build_null_declspecs ();
struct c_declarator *declarator;
struct c_type_name *ret;
bool dummy = false;
c_parser_declspecs (parser, specs, false, true, true, alignas_ok, false,
cla_prefer_type);
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL;
}
if (specs->type != error_mark_node)
{
pending_xref_error ();
finish_declspecs (specs);
}
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_ABSTRACT, &dummy);
if (declarator == NULL)
return NULL;
ret = XOBNEW (&parser_obstack, struct c_type_name);
ret->specs = specs;
ret->declarator = declarator;
return ret;
}
/* Parse an initializer (C90 6.5.7, C99 6.7.8, C11 6.7.9).
initializer:
assignment-expression
{ initializer-list }
{ initializer-list , }
initializer-list:
designation[opt] initializer
initializer-list , designation[opt] initializer
designation:
designator-list =
designator-list:
designator
designator-list designator
designator:
array-designator
. identifier
array-designator:
[ constant-expression ]
GNU extensions:
initializer:
{ }
designation:
array-designator
identifier :
array-designator:
[ constant-expression ... constant-expression ]
Any expression without commas is accepted in the syntax for the
constant-expressions, with non-constant expressions rejected later.
This function is only used for top-level initializers; for nested
ones, see c_parser_initval. */
static struct c_expr
c_parser_initializer (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_braced_init (parser, NULL_TREE, false, NULL);
else
{
struct c_expr ret;
location_t loc = c_parser_peek_token (parser)->location;
ret = c_parser_expr_no_commas (parser, NULL);
if (TREE_CODE (ret.value) != STRING_CST
&& TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR)
ret = convert_lvalue_to_rvalue (loc, ret, true, true);
return ret;
}
}
/* The location of the last comma within the current initializer list,
or UNKNOWN_LOCATION if not within one. */
location_t last_init_list_comma;
/* Parse a braced initializer list. TYPE is the type specified for a
compound literal, and NULL_TREE for other initializers and for
nested braced lists. NESTED_P is true for nested braced lists,
false for the list of a compound literal or the list that is the
top-level initializer in a declaration. */
static struct c_expr
c_parser_braced_init (c_parser *parser, tree type, bool nested_p,
struct obstack *outer_obstack)
{
struct c_expr ret;
struct obstack braced_init_obstack;
location_t brace_loc = c_parser_peek_token (parser)->location;
gcc_obstack_init (&braced_init_obstack);
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
matching_braces braces;
braces.consume_open (parser);
if (nested_p)
{
finish_implicit_inits (brace_loc, outer_obstack);
push_init_level (brace_loc, 0, &braced_init_obstack);
}
else
really_start_incremental_init (type);
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
pedwarn (brace_loc, OPT_Wpedantic, "ISO C forbids empty initializer braces");
}
else
{
/* Parse a non-empty initializer list, possibly with a trailing
comma. */
while (true)
{
c_parser_initelt (parser, &braced_init_obstack);
if (parser->error)
break;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
last_init_list_comma = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
}
else
break;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
}
}
c_token *next_tok = c_parser_peek_token (parser);
if (next_tok->type != CPP_CLOSE_BRACE)
{
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
braces.skip_until_found_close (parser);
pop_init_level (brace_loc, 0, &braced_init_obstack, last_init_list_comma);
obstack_free (&braced_init_obstack, NULL);
return ret;
}
location_t close_loc = next_tok->location;
c_parser_consume_token (parser);
ret = pop_init_level (brace_loc, 0, &braced_init_obstack, close_loc);
obstack_free (&braced_init_obstack, NULL);
set_c_expr_source_range (&ret, brace_loc, close_loc);
return ret;
}
/* Parse a nested initializer, including designators. */
static void
c_parser_initelt (c_parser *parser, struct obstack * braced_init_obstack)
{
/* Parse any designator or designator list. A single array
designator may have the subsequent "=" omitted in GNU C, but a
longer list or a structure member designator may not. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
/* Old-style structure member designator. */
set_init_label (c_parser_peek_token (parser)->location,
c_parser_peek_token (parser)->value,
c_parser_peek_token (parser)->location,
braced_init_obstack);
/* Use the colon as the error location. */
pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_Wpedantic,
"obsolete use of designated initializer with %<:%>");
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else
{
/* des_seen is 0 if there have been no designators, 1 if there
has been a single array designator and 2 otherwise. */
int des_seen = 0;
/* Location of a designator. */
location_t des_loc = UNKNOWN_LOCATION; /* Quiet warning. */
while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)
|| c_parser_next_token_is (parser, CPP_DOT))
{
int des_prev = des_seen;
if (!des_seen)
des_loc = c_parser_peek_token (parser)->location;
if (des_seen < 2)
des_seen++;
if (c_parser_next_token_is (parser, CPP_DOT))
{
des_seen = 2;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
set_init_label (des_loc, c_parser_peek_token (parser)->value,
c_parser_peek_token (parser)->location,
braced_init_obstack);
c_parser_consume_token (parser);
}
else
{
struct c_expr init;
init.set_error ();
init.original_code = ERROR_MARK;
init.original_type = NULL;
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (input_location, init, false,
braced_init_obstack);
return;
}
}
else
{
tree first, second;
location_t ellipsis_loc = UNKNOWN_LOCATION; /* Quiet warning. */
location_t array_index_loc = UNKNOWN_LOCATION;
/* ??? Following the old parser, [ objc-receiver
objc-message-args ] is accepted as an initializer,
being distinguished from a designator by what follows
the first assignment expression inside the square
brackets, but after a first array designator a
subsequent square bracket is for Objective-C taken to
start an expression, using the obsolete form of
designated initializer without '=', rather than
possibly being a second level of designation: in LALR
terms, the '[' is shifted rather than reducing
designator to designator-list. */
if (des_prev == 1 && c_dialect_objc ())
{
des_seen = des_prev;
break;
}
if (des_prev == 0 && c_dialect_objc ())
{
/* This might be an array designator or an
Objective-C message expression. If the former,
continue parsing here; if the latter, parse the
remainder of the initializer given the starting
primary-expression. ??? It might make sense to
distinguish when des_prev == 1 as well; see
previous comment. */
tree rec, args;
struct c_expr mexpr;
c_parser_consume_token (parser);
if (c_parser_peek_token (parser)->type == CPP_NAME
&& ((c_parser_peek_token (parser)->id_kind
== C_ID_TYPENAME)
|| (c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME)))
{
/* Type name receiver. */
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
rec = objc_get_class_reference (id);
goto parse_message_args;
}
first = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (first);
if (c_parser_next_token_is (parser, CPP_ELLIPSIS)
|| c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
goto array_desig_after_first;
/* Expression receiver. So far only one part
without commas has been parsed; there might be
more of the expression. */
rec = first;
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
location_t comma_loc, exp_loc;
comma_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
next = c_parser_expr_no_commas (parser, NULL);
next = convert_lvalue_to_rvalue (exp_loc, next,
true, true);
rec = build_compound_expr (comma_loc, rec, next.value);
}
parse_message_args:
/* Now parse the objc-message-args. */
args = c_parser_objc_message_args (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
mexpr.value
= objc_build_message_expr (rec, args);
mexpr.original_code = ERROR_MARK;
mexpr.original_type = NULL;
/* Now parse and process the remainder of the
initializer, starting with this message
expression as a primary-expression. */
c_parser_initval (parser, &mexpr, braced_init_obstack);
return;
}
c_parser_consume_token (parser);
array_index_loc = c_parser_peek_token (parser)->location;
first = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (first);
array_desig_after_first:
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
ellipsis_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
second = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (second);
}
else
second = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
c_parser_consume_token (parser);
set_init_index (array_index_loc, first, second,
braced_init_obstack);
if (second)
pedwarn (ellipsis_loc, OPT_Wpedantic,
"ISO C forbids specifying range of elements to initialize");
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
}
if (des_seen >= 1)
{
if (c_parser_next_token_is (parser, CPP_EQ))
{
pedwarn_c90 (des_loc, OPT_Wpedantic,
"ISO C90 forbids specifying subobject "
"to initialize");
c_parser_consume_token (parser);
}
else
{
if (des_seen == 1)
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"obsolete use of designated initializer without %<=%>");
else
{
struct c_expr init;
init.set_error ();
init.original_code = ERROR_MARK;
init.original_type = NULL;
c_parser_error (parser, "expected %<=%>");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (input_location, init, false,
braced_init_obstack);
return;
}
}
}
}
c_parser_initval (parser, NULL, braced_init_obstack);
}
/* Parse a nested initializer; as c_parser_initializer but parses
initializers within braced lists, after any designators have been
applied. If AFTER is not NULL then it is an Objective-C message
expression which is the primary-expression starting the
initializer. */
static void
c_parser_initval (c_parser *parser, struct c_expr *after,
struct obstack * braced_init_obstack)
{
struct c_expr init;
gcc_assert (!after || c_dialect_objc ());
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after)
init = c_parser_braced_init (parser, NULL_TREE, true,
braced_init_obstack);
else
{
init = c_parser_expr_no_commas (parser, after);
if (init.value != NULL_TREE
&& TREE_CODE (init.value) != STRING_CST
&& TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR)
init = convert_lvalue_to_rvalue (loc, init, true, true);
}
process_init_element (loc, init, false, braced_init_obstack);
}
/* Parse a compound statement (possibly a function body) (C90 6.6.2,
C99 6.8.2, C11 6.8.2).
compound-statement:
{ block-item-list[opt] }
{ label-declarations block-item-list }
block-item-list:
block-item
block-item-list block-item
block-item:
nested-declaration
statement
nested-declaration:
declaration
GNU extensions:
compound-statement:
{ label-declarations block-item-list }
nested-declaration:
__extension__ nested-declaration
nested-function-definition
label-declarations:
label-declaration
label-declarations label-declaration
label-declaration:
__label__ identifier-list ;
Allowing the mixing of declarations and code is new in C99. The
GNU syntax also permits (not shown above) labels at the end of
compound statements, which yield an error. We don't allow labels
on declarations; this might seem like a natural extension, but
there would be a conflict between attributes on the label and
prefix attributes on the declaration. ??? The syntax follows the
old parser in requiring something after label declarations.
Although they are erroneous if the labels declared aren't defined,
is it useful for the syntax to be this way?
OpenACC:
block-item:
openacc-directive
openacc-directive:
update-directive
OpenMP:
block-item:
openmp-directive
openmp-directive:
barrier-directive
flush-directive
taskwait-directive
taskyield-directive
cancel-directive
cancellation-point-directive */
static tree
c_parser_compound_statement (c_parser *parser)
{
tree stmt;
location_t brace_loc;
brace_loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Ensure a scope is entered and left anyway to avoid confusion
if we have just prepared to enter a function body. */
stmt = c_begin_compound_stmt (true);
c_end_compound_stmt (brace_loc, stmt, true);
return error_mark_node;
}
stmt = c_begin_compound_stmt (true);
c_parser_compound_statement_nostart (parser);
return c_end_compound_stmt (brace_loc, stmt, true);
}
/* Parse a compound statement except for the opening brace. This is
used for parsing both compound statements and statement expressions
(which follow different paths to handling the opening). */
static void
c_parser_compound_statement_nostart (c_parser *parser)
{
bool last_stmt = false;
bool last_label = false;
bool save_valid_for_pragma = valid_location_for_stdc_pragma_p ();
location_t label_loc = UNKNOWN_LOCATION; /* Quiet warning. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
add_debug_begin_stmt (c_parser_peek_token (parser)->location);
c_parser_consume_token (parser);
return;
}
mark_valid_location_for_stdc_pragma (true);
if (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
/* Read zero or more forward-declarations for labels that nested
functions can jump to. */
mark_valid_location_for_stdc_pragma (false);
while (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
label_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names,
are OK here. */
while (true)
{
tree label;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
label
= declare_label (c_parser_peek_token (parser)->value);
C_DECLARED_LABEL_FLAG (label) = 1;
add_stmt (build_stmt (label_loc, DECL_EXPR, label));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
pedwarn (label_loc, OPT_Wpedantic, "ISO C forbids label declarations");
}
/* We must now have at least one statement, label or declaration. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
c_parser_error (parser, "expected declaration or statement");
c_parser_consume_token (parser);
return;
}
while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE))
{
location_t loc = c_parser_peek_token (parser)->location;
loc = expansion_point_location_if_in_system_header (loc);
if (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
{
if (c_parser_next_token_is_keyword (parser, RID_CASE))
label_loc = c_parser_peek_2nd_token (parser)->location;
else
label_loc = c_parser_peek_token (parser)->location;
last_label = true;
last_stmt = false;
mark_valid_location_for_stdc_pragma (false);
c_parser_label (parser);
}
else if (!last_label
&& c_parser_next_tokens_start_declaration (parser))
{
last_label = false;
mark_valid_location_for_stdc_pragma (false);
bool fallthru_attr_p = false;
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, NULL, vNULL, NULL,
&fallthru_attr_p);
if (last_stmt && !fallthru_attr_p)
pedwarn_c90 (loc, OPT_Wdeclaration_after_statement,
"ISO C90 forbids mixed declarations and code");
last_stmt = fallthru_attr_p;
}
else if (!last_label
&& c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declaration (c_parser_peek_2nd_token (parser)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
last_label = false;
mark_valid_location_for_stdc_pragma (false);
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, NULL, vNULL);
/* Following the old parser, __extension__ does not
disable this diagnostic. */
restore_extension_diagnostics (ext);
if (last_stmt)
pedwarn_c90 (loc, OPT_Wdeclaration_after_statement,
"ISO C90 forbids mixed declarations and code");
last_stmt = false;
}
else
goto statement;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
/* External pragmas, and some omp pragmas, are not associated
with regular c code, and so are not to be considered statements
syntactically. This ensures that the user doesn't put them
places that would turn into syntax errors if the directive
were ignored. */
if (c_parser_pragma (parser,
last_label ? pragma_stmt : pragma_compound,
NULL))
last_label = false, last_stmt = true;
}
else if (c_parser_next_token_is (parser, CPP_EOF))
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
c_parser_error (parser, "expected declaration or statement");
return;
}
else if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
if (parser->in_if_block)
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
error_at (loc, "expected %<}%> before %<else%>");
return;
}
else
{
error_at (loc, "%<else%> without a previous %<if%>");
c_parser_consume_token (parser);
continue;
}
}
else
{
statement:
last_label = false;
last_stmt = true;
mark_valid_location_for_stdc_pragma (false);
c_parser_statement_after_labels (parser, NULL);
}
parser->error = false;
}
if (last_label)
error_at (label_loc, "label at end of compound statement");
c_parser_consume_token (parser);
/* Restore the value we started with. */
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
}
/* Parse all consecutive labels. */
static void
c_parser_all_labels (c_parser *parser)
{
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
}
/* Parse a label (C90 6.6.1, C99 6.8.1, C11 6.8.1).
label:
identifier : attributes[opt]
case constant-expression :
default :
GNU extensions:
label:
case constant-expression ... constant-expression :
The use of attributes on labels is a GNU extension. The syntax in
GNU C accepts any expressions without commas, non-constant
expressions being rejected later. */
static void
c_parser_label (c_parser *parser)
{
location_t loc1 = c_parser_peek_token (parser)->location;
tree label = NULL_TREE;
/* Remember whether this case or a user-defined label is allowed to fall
through to. */
bool fallthrough_p = c_parser_peek_token (parser)->flags & PREV_FALLTHROUGH;
if (c_parser_next_token_is_keyword (parser, RID_CASE))
{
tree exp1, exp2;
c_parser_consume_token (parser);
exp1 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
label = do_case (loc1, exp1, NULL_TREE);
}
else if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
exp2 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (loc1, exp1, exp2);
}
else
c_parser_error (parser, "expected %<:%> or %<...%>");
}
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
{
c_parser_consume_token (parser);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (loc1, NULL_TREE, NULL_TREE);
}
else
{
tree name = c_parser_peek_token (parser)->value;
tree tlab;
tree attrs;
location_t loc2 = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is (parser, CPP_NAME));
c_parser_consume_token (parser);
gcc_assert (c_parser_next_token_is (parser, CPP_COLON));
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
tlab = define_label (loc2, name);
if (tlab)
{
decl_attributes (&tlab, attrs, 0);
label = add_stmt (build_stmt (loc1, LABEL_EXPR, tlab));
}
}
if (label)
{
if (TREE_CODE (label) == LABEL_EXPR)
FALLTHROUGH_LABEL_P (LABEL_EXPR_LABEL (label)) = fallthrough_p;
else
FALLTHROUGH_LABEL_P (CASE_LABEL (label)) = fallthrough_p;
/* Allow '__attribute__((fallthrough));'. */
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
location_t loc = c_parser_peek_token (parser)->location;
tree attrs = c_parser_attributes (parser);
if (attribute_fallthrough_p (attrs))
{
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree fn = build_call_expr_internal_loc (loc,
IFN_FALLTHROUGH,
void_type_node, 0);
add_stmt (fn);
}
else
warning_at (loc, OPT_Wattributes, "%<fallthrough%> attribute "
"not followed by %<;%>");
}
else if (attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes, "only attribute %<fallthrough%>"
" can be applied to a null statement");
}
if (c_parser_next_tokens_start_declaration (parser))
{
error_at (c_parser_peek_token (parser)->location,
"a label can only be part of a statement and "
"a declaration is not a statement");
c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false,
/*static_assert_ok*/ true,
/*empty_ok*/ true, /*nested*/ true,
/*start_attr_ok*/ true, NULL,
vNULL);
}
}
}
/* Parse a statement (C90 6.6, C99 6.8, C11 6.8).
statement:
labeled-statement
compound-statement
expression-statement
selection-statement
iteration-statement
jump-statement
labeled-statement:
label statement
expression-statement:
expression[opt] ;
selection-statement:
if-statement
switch-statement
iteration-statement:
while-statement
do-statement
for-statement
jump-statement:
goto identifier ;
continue ;
break ;
return expression[opt] ;
GNU extensions:
statement:
asm-statement
jump-statement:
goto * expression ;
expression-statement:
attributes ;
Objective-C:
statement:
objc-throw-statement
objc-try-catch-statement
objc-synchronized-statement
objc-throw-statement:
@throw expression ;
@throw ;
OpenACC:
statement:
openacc-construct
openacc-construct:
parallel-construct
kernels-construct
data-construct
loop-construct
parallel-construct:
parallel-directive structured-block
kernels-construct:
kernels-directive structured-block
data-construct:
data-directive structured-block
loop-construct:
loop-directive structured-block
OpenMP:
statement:
openmp-construct
openmp-construct:
parallel-construct
for-construct
simd-construct
for-simd-construct
sections-construct
single-construct
parallel-for-construct
parallel-for-simd-construct
parallel-sections-construct
master-construct
critical-construct
atomic-construct
ordered-construct
parallel-construct:
parallel-directive structured-block
for-construct:
for-directive iteration-statement
simd-construct:
simd-directive iteration-statements
for-simd-construct:
for-simd-directive iteration-statements
sections-construct:
sections-directive section-scope
single-construct:
single-directive structured-block
parallel-for-construct:
parallel-for-directive iteration-statement
parallel-for-simd-construct:
parallel-for-simd-directive iteration-statement
parallel-sections-construct:
parallel-sections-directive section-scope
master-construct:
master-directive structured-block
critical-construct:
critical-directive structured-block
atomic-construct:
atomic-directive expression-statement
ordered-construct:
ordered-directive structured-block
Transactional Memory:
statement:
transaction-statement
transaction-cancel-statement
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_statement (c_parser *parser, bool *if_p, location_t *loc_after_labels)
{
c_parser_all_labels (parser);
if (loc_after_labels)
*loc_after_labels = c_parser_peek_token (parser)->location;
c_parser_statement_after_labels (parser, if_p, NULL);
}
/* Parse a statement, other than a labeled statement. CHAIN is a vector
of if-else-if conditions.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_statement_after_labels (c_parser *parser, bool *if_p,
vec<tree> *chain)
{
location_t loc = c_parser_peek_token (parser)->location;
tree stmt = NULL_TREE;
bool in_if_block = parser->in_if_block;
parser->in_if_block = false;
if (if_p != NULL)
*if_p = false;
if (c_parser_peek_token (parser)->type != CPP_OPEN_BRACE)
add_debug_begin_stmt (loc);
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_BRACE:
add_stmt (c_parser_compound_statement (parser));
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_IF:
c_parser_if_statement (parser, if_p, chain);
break;
case RID_SWITCH:
c_parser_switch_statement (parser, if_p);
break;
case RID_WHILE:
c_parser_while_statement (parser, false, 0, if_p);
break;
case RID_DO:
c_parser_do_statement (parser, 0, false);
break;
case RID_FOR:
c_parser_for_statement (parser, false, 0, if_p);
break;
case RID_GOTO:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
stmt = c_finish_goto_label (loc,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
struct c_expr val;
c_parser_consume_token (parser);
val = c_parser_expression (parser);
val = convert_lvalue_to_rvalue (loc, val, false, true);
stmt = c_finish_goto_ptr (loc, val.value);
}
else
c_parser_error (parser, "expected identifier or %<*%>");
goto expect_semicolon;
case RID_CONTINUE:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (loc, &c_cont_label, false);
goto expect_semicolon;
case RID_BREAK:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (loc, &c_break_label, true);
goto expect_semicolon;
case RID_RETURN:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = c_finish_return (loc, NULL_TREE, NULL_TREE);
c_parser_consume_token (parser);
}
else
{
location_t xloc = c_parser_peek_token (parser)->location;
struct c_expr expr = c_parser_expression_conv (parser);
mark_exp_read (expr.value);
stmt = c_finish_return (EXPR_LOC_OR_LOC (expr.value, xloc),
expr.value, expr.original_type);
goto expect_semicolon;
}
break;
case RID_ASM:
stmt = c_parser_asm_statement (parser);
break;
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
stmt = c_parser_transaction (parser,
c_parser_peek_token (parser)->keyword);
break;
case RID_TRANSACTION_CANCEL:
stmt = c_parser_transaction_cancel (parser);
goto expect_semicolon;
case RID_AT_THROW:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = objc_build_throw_stmt (loc, NULL_TREE);
c_parser_consume_token (parser);
}
else
{
struct c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (loc, expr, false, false);
expr.value = c_fully_fold (expr.value, false, NULL);
stmt = objc_build_throw_stmt (loc, expr.value);
goto expect_semicolon;
}
break;
case RID_AT_TRY:
gcc_assert (c_dialect_objc ());
c_parser_objc_try_catch_finally_statement (parser);
break;
case RID_AT_SYNCHRONIZED:
gcc_assert (c_dialect_objc ());
c_parser_objc_synchronized_statement (parser);
break;
case RID_ATTRIBUTE:
{
/* Allow '__attribute__((fallthrough));'. */
tree attrs = c_parser_attributes (parser);
if (attribute_fallthrough_p (attrs))
{
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree fn = build_call_expr_internal_loc (loc,
IFN_FALLTHROUGH,
void_type_node, 0);
add_stmt (fn);
/* Eat the ';'. */
c_parser_consume_token (parser);
}
else
warning_at (loc, OPT_Wattributes,
"%<fallthrough%> attribute not followed "
"by %<;%>");
}
else if (attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes, "only attribute %<fallthrough%>"
" can be applied to a null statement");
break;
}
default:
goto expr_stmt;
}
break;
case CPP_SEMICOLON:
c_parser_consume_token (parser);
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
/* Avoid infinite loop in error recovery:
c_parser_skip_until_found stops at a closing nesting
delimiter without consuming it, but here we need to consume
it to proceed further. */
c_parser_error (parser, "expected statement");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_stmt, if_p);
break;
default:
expr_stmt:
stmt = c_finish_expr_stmt (loc, c_parser_expression_conv (parser).value);
expect_semicolon:
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
break;
}
/* Two cases cannot and do not have line numbers associated: If stmt
is degenerate, such as "2;", then stmt is an INTEGER_CST, which
cannot hold line numbers. But that's OK because the statement
will either be changed to a MODIFY_EXPR during gimplification of
the statement expr, or discarded. If stmt was compound, but
without new variables, we will have skipped the creation of a
BIND and will have a bare STATEMENT_LIST. But that's OK because
(recursively) all of the component statements should already have
line numbers assigned. ??? Can we discard no-op statements
earlier? */
if (EXPR_LOCATION (stmt) == UNKNOWN_LOCATION)
protected_set_expr_location (stmt, loc);
parser->in_if_block = in_if_block;
}
/* Parse the condition from an if, do, while or for statements. */
static tree
c_parser_condition (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree cond;
cond = c_parser_expression_conv (parser).value;
cond = c_objc_common_truthvalue_conversion (loc, cond);
cond = c_fully_fold (cond, false, NULL);
if (warn_sequence_point)
verify_sequence_points (cond);
return cond;
}
/* Parse a parenthesized condition from an if, do or while statement.
condition:
( expression )
*/
static tree
c_parser_paren_condition (c_parser *parser)
{
tree cond;
matching_parens parens;
if (!parens.require_open (parser))
return error_mark_node;
cond = c_parser_condition (parser);
parens.skip_until_found_close (parser);
return cond;
}
/* Parse a statement which is a block in C99.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static tree
c_parser_c99_block_statement (c_parser *parser, bool *if_p,
location_t *loc_after_labels)
{
tree block = c_begin_compound_stmt (flag_isoc99);
location_t loc = c_parser_peek_token (parser)->location;
c_parser_statement (parser, if_p, loc_after_labels);
return c_end_compound_stmt (loc, block, flag_isoc99);
}
/* Parse the body of an if statement. This is just parsing a
statement but (a) it is a block in C99, (b) we track whether the
body is an if statement for the sake of -Wparentheses warnings, (c)
we handle an empty body specially for the sake of -Wempty-body
warnings, and (d) we call parser_compound_statement directly
because c_parser_statement_after_labels resets
parser->in_if_block.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static tree
c_parser_if_body (c_parser *parser, bool *if_p,
const token_indent_info &if_tinfo)
{
tree block = c_begin_compound_stmt (flag_isoc99);
location_t body_loc = c_parser_peek_token (parser)->location;
location_t body_loc_after_labels = UNKNOWN_LOCATION;
token_indent_info body_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_all_labels (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
location_t loc = c_parser_peek_token (parser)->location;
add_stmt (build_empty_stmt (loc));
c_parser_consume_token (parser);
if (!c_parser_next_token_is_keyword (parser, RID_ELSE))
warning_at (loc, OPT_Wempty_body,
"suggest braces around empty body in an %<if%> statement");
}
else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
add_stmt (c_parser_compound_statement (parser));
else
{
body_loc_after_labels = c_parser_peek_token (parser)->location;
c_parser_statement_after_labels (parser, if_p);
}
token_indent_info next_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
warn_for_misleading_indentation (if_tinfo, body_tinfo, next_tinfo);
if (body_loc_after_labels != UNKNOWN_LOCATION
&& next_tinfo.type != CPP_SEMICOLON)
warn_for_multistatement_macros (body_loc_after_labels, next_tinfo.location,
if_tinfo.location, RID_IF);
return c_end_compound_stmt (body_loc, block, flag_isoc99);
}
/* Parse the else body of an if statement. This is just parsing a
statement but (a) it is a block in C99, (b) we handle an empty body
specially for the sake of -Wempty-body warnings. CHAIN is a vector
of if-else-if conditions. */
static tree
c_parser_else_body (c_parser *parser, const token_indent_info &else_tinfo,
vec<tree> *chain)
{
location_t body_loc = c_parser_peek_token (parser)->location;
tree block = c_begin_compound_stmt (flag_isoc99);
token_indent_info body_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
location_t body_loc_after_labels = UNKNOWN_LOCATION;
c_parser_all_labels (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
location_t loc = c_parser_peek_token (parser)->location;
warning_at (loc,
OPT_Wempty_body,
"suggest braces around empty body in an %<else%> statement");
add_stmt (build_empty_stmt (loc));
c_parser_consume_token (parser);
}
else
{
if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE))
body_loc_after_labels = c_parser_peek_token (parser)->location;
c_parser_statement_after_labels (parser, NULL, chain);
}
token_indent_info next_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
warn_for_misleading_indentation (else_tinfo, body_tinfo, next_tinfo);
if (body_loc_after_labels != UNKNOWN_LOCATION
&& next_tinfo.type != CPP_SEMICOLON)
warn_for_multistatement_macros (body_loc_after_labels, next_tinfo.location,
else_tinfo.location, RID_ELSE);
return c_end_compound_stmt (body_loc, block, flag_isoc99);
}
/* We might need to reclassify any previously-lexed identifier, e.g.
when we've left a for loop with an if-statement without else in the
body - we might have used a wrong scope for the token. See PR67784. */
static void
c_parser_maybe_reclassify_token (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *token = c_parser_peek_token (parser);
if (token->id_kind != C_ID_CLASSNAME)
{
tree decl = lookup_name (token->value);
token->id_kind = C_ID_ID;
if (decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
token->id_kind = C_ID_TYPENAME;
}
else if (c_dialect_objc ())
{
tree objc_interface_decl = objc_is_class_name (token->value);
/* Objective-C class names are in the same namespace as
variables and typedefs, and hence are shadowed by local
declarations. */
if (objc_interface_decl)
{
token->value = objc_interface_decl;
token->id_kind = C_ID_CLASSNAME;
}
}
}
}
}
/* Parse an if statement (C90 6.6.4, C99 6.8.4, C11 6.8.4).
if-statement:
if ( expression ) statement
if ( expression ) statement else statement
CHAIN is a vector of if-else-if conditions.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_if_statement (c_parser *parser, bool *if_p, vec<tree> *chain)
{
tree block;
location_t loc;
tree cond;
bool nested_if = false;
tree first_body, second_body;
bool in_if_block;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF));
token_indent_info if_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
in_if_block = parser->in_if_block;
parser->in_if_block = true;
first_body = c_parser_if_body (parser, &nested_if, if_tinfo);
parser->in_if_block = in_if_block;
if (warn_duplicated_cond)
warn_duplicated_cond_add_or_warn (EXPR_LOCATION (cond), cond, &chain);
if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
token_indent_info else_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_consume_token (parser);
if (warn_duplicated_cond)
{
if (c_parser_next_token_is_keyword (parser, RID_IF)
&& chain == NULL)
{
/* We've got "if (COND) else if (COND2)". Start the
condition chain and add COND as the first element. */
chain = new vec<tree> ();
if (!CONSTANT_CLASS_P (cond) && !TREE_SIDE_EFFECTS (cond))
chain->safe_push (cond);
}
else if (!c_parser_next_token_is_keyword (parser, RID_IF))
{
/* This is if-else without subsequent if. Zap the condition
chain; we would have already warned at this point. */
delete chain;
chain = NULL;
}
}
second_body = c_parser_else_body (parser, else_tinfo, chain);
/* Set IF_P to true to indicate that this if statement has an
else clause. This may trigger the Wparentheses warning
below when we get back up to the parent if statement. */
if (if_p != NULL)
*if_p = true;
}
else
{
second_body = NULL_TREE;
/* Diagnose an ambiguous else if if-then-else is nested inside
if-then. */
if (nested_if)
warning_at (loc, OPT_Wdangling_else,
"suggest explicit braces to avoid ambiguous %<else%>");
if (warn_duplicated_cond)
{
/* This if statement does not have an else clause. We don't
need the condition chain anymore. */
delete chain;
chain = NULL;
}
}
c_finish_if_stmt (loc, cond, first_body, second_body);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
c_parser_maybe_reclassify_token (parser);
}
/* Parse a switch statement (C90 6.6.4, C99 6.8.4, C11 6.8.4).
switch-statement:
switch (expression) statement
*/
static void
c_parser_switch_statement (c_parser *parser, bool *if_p)
{
struct c_expr ce;
tree block, expr, body, save_break;
location_t switch_loc = c_parser_peek_token (parser)->location;
location_t switch_cond_loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
bool explicit_cast_p = false;
matching_parens parens;
if (parens.require_open (parser))
{
switch_cond_loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
explicit_cast_p = true;
ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (switch_cond_loc, ce, true, false);
expr = ce.value;
/* ??? expr has no valid location? */
parens.skip_until_found_close (parser);
}
else
{
switch_cond_loc = UNKNOWN_LOCATION;
expr = error_mark_node;
ce.original_type = error_mark_node;
}
c_start_case (switch_loc, switch_cond_loc, expr, explicit_cast_p);
save_break = c_break_label;
c_break_label = NULL_TREE;
location_t loc_after_labels;
bool open_brace_p = c_parser_peek_token (parser)->type == CPP_OPEN_BRACE;
body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels);
location_t next_loc = c_parser_peek_token (parser)->location;
if (!open_brace_p && c_parser_peek_token (parser)->type != CPP_SEMICOLON)
warn_for_multistatement_macros (loc_after_labels, next_loc, switch_loc,
RID_SWITCH);
if (c_break_label)
{
location_t here = c_parser_peek_token (parser)->location;
tree t = build1 (LABEL_EXPR, void_type_node, c_break_label);
SET_EXPR_LOCATION (t, here);
SWITCH_BREAK_LABEL_P (c_break_label) = 1;
append_to_statement_list_force (t, &body);
}
c_finish_case (body, ce.original_type);
c_break_label = save_break;
add_stmt (c_end_compound_stmt (switch_loc, block, flag_isoc99));
c_parser_maybe_reclassify_token (parser);
}
/* Parse a while statement (C90 6.6.5, C99 6.8.5, C11 6.8.5).
while-statement:
while (expression) statement
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_while_statement (c_parser *parser, bool ivdep, unsigned short unroll,
bool *if_p)
{
tree block, cond, body, save_break, save_cont;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE));
token_indent_info while_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
if (ivdep && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_ivdep_kind),
integer_zero_node);
if (unroll && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
build_int_cst (integer_type_node, unroll));
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
token_indent_info body_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
location_t loc_after_labels;
bool open_brace = c_parser_next_token_is (parser, CPP_OPEN_BRACE);
body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels);
c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, true);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
c_parser_maybe_reclassify_token (parser);
token_indent_info next_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
warn_for_misleading_indentation (while_tinfo, body_tinfo, next_tinfo);
if (next_tinfo.type != CPP_SEMICOLON && !open_brace)
warn_for_multistatement_macros (loc_after_labels, next_tinfo.location,
while_tinfo.location, RID_WHILE);
c_break_label = save_break;
c_cont_label = save_cont;
}
/* Parse a do statement (C90 6.6.5, C99 6.8.5, C11 6.8.5).
do-statement:
do statement while ( expression ) ;
*/
static void
c_parser_do_statement (c_parser *parser, bool ivdep, unsigned short unroll)
{
tree block, cond, body, save_break, save_cont, new_break, new_cont;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
warning_at (c_parser_peek_token (parser)->location,
OPT_Wempty_body,
"suggest braces around empty body in %<do%> statement");
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser, NULL);
c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>");
new_break = c_break_label;
c_break_label = save_break;
new_cont = c_cont_label;
c_cont_label = save_cont;
cond = c_parser_paren_condition (parser);
if (ivdep && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_ivdep_kind),
integer_zero_node);
if (unroll && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
build_int_cst (integer_type_node, unroll));
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
c_finish_loop (loc, cond, NULL, body, new_break, new_cont, false);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
}
/* Parse a for statement (C90 6.6.5, C99 6.8.5, C11 6.8.5).
for-statement:
for ( expression[opt] ; expression[opt] ; expression[opt] ) statement
for ( nested-declaration expression[opt] ; expression[opt] ) statement
The form with a declaration is new in C99.
??? In accordance with the old parser, the declaration may be a
nested function, which is then rejected in check_for_loop_decls,
but does it make any sense for this to be included in the grammar?
Note in particular that the nested function does not include a
trailing ';', whereas the "declaration" production includes one.
Also, can we reject bad declarations earlier and cheaper than
check_for_loop_decls?
In Objective-C, there are two additional variants:
foreach-statement:
for ( expression in expresssion ) statement
for ( declaration in expression ) statement
This is inconsistent with C, because the second variant is allowed
even if c99 is not enabled.
The rest of the comment documents these Objective-C foreach-statement.
Here is the canonical example of the first variant:
for (object in array) { do something with object }
we call the first expression ("object") the "object_expression" and
the second expression ("array") the "collection_expression".
object_expression must be an lvalue of type "id" (a generic Objective-C
object) because the loop works by assigning to object_expression the
various objects from the collection_expression. collection_expression
must evaluate to something of type "id" which responds to the method
countByEnumeratingWithState:objects:count:.
The canonical example of the second variant is:
for (id object in array) { do something with object }
which is completely equivalent to
{
id object;
for (object in array) { do something with object }
}
Note that initizializing 'object' in some way (eg, "for ((object =
xxx) in array) { do something with object }") is possibly
technically valid, but completely pointless as 'object' will be
assigned to something else as soon as the loop starts. We should
most likely reject it (TODO).
The beginning of the Objective-C foreach-statement looks exactly
like the beginning of the for-statement, and we can tell it is a
foreach-statement only because the initial declaration or
expression is terminated by 'in' instead of ';'.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_for_statement (c_parser *parser, bool ivdep, unsigned short unroll,
bool *if_p)
{
tree block, cond, incr, save_break, save_cont, body;
/* The following are only used when parsing an ObjC foreach statement. */
tree object_expression;
/* Silence the bogus uninitialized warning. */
tree collection_expression = NULL;
location_t loc = c_parser_peek_token (parser)->location;
location_t for_loc = c_parser_peek_token (parser)->location;
bool is_foreach_statement = false;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR));
token_indent_info for_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_consume_token (parser);
/* Open a compound statement in Objective-C as well, just in case this is
as foreach expression. */
block = c_begin_compound_stmt (flag_isoc99 || c_dialect_objc ());
cond = error_mark_node;
incr = error_mark_node;
matching_parens parens;
if (parens.require_open (parser))
{
/* Parse the initialization declaration or expression. */
object_expression = error_mark_node;
parser->objc_could_be_foreach_context = c_dialect_objc ();
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
parser->objc_could_be_foreach_context = false;
c_parser_consume_token (parser);
c_finish_expr_stmt (loc, NULL_TREE);
}
else if (c_parser_next_tokens_start_declaration (parser))
{
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
&object_expression, vNULL);
parser->objc_could_be_foreach_context = false;
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (check_for_loop_decls (for_loc, true) == NULL_TREE)
c_parser_error (parser, "multiple iterating variables in fast enumeration");
}
else
check_for_loop_decls (for_loc, flag_isoc99);
}
else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declaration (c_parser_peek_2nd_token (parser)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, &object_expression, vNULL);
parser->objc_could_be_foreach_context = false;
restore_extension_diagnostics (ext);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (check_for_loop_decls (for_loc, true) == NULL_TREE)
c_parser_error (parser, "multiple iterating variables in fast enumeration");
}
else
check_for_loop_decls (for_loc, flag_isoc99);
}
else
goto init_expr;
}
else
{
init_expr:
{
struct c_expr ce;
tree init_expression;
ce = c_parser_expression (parser);
init_expression = ce.value;
parser->objc_could_be_foreach_context = false;
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (! lvalue_p (init_expression))
c_parser_error (parser, "invalid iterating variable in fast enumeration");
object_expression = c_fully_fold (init_expression, false, NULL);
}
else
{
ce = convert_lvalue_to_rvalue (loc, ce, true, false);
init_expression = ce.value;
c_finish_expr_stmt (loc, init_expression);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
}
/* Parse the loop condition. In the case of a foreach
statement, there is no loop condition. */
gcc_assert (!parser->objc_could_be_foreach_context);
if (!is_foreach_statement)
{
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (ivdep)
{
c_parser_error (parser, "missing loop condition in loop with "
"%<GCC ivdep%> pragma");
cond = error_mark_node;
}
else if (unroll)
{
c_parser_error (parser, "missing loop condition in loop with "
"%<GCC unroll%> pragma");
cond = error_mark_node;
}
else
{
c_parser_consume_token (parser);
cond = NULL_TREE;
}
}
else
{
cond = c_parser_condition (parser);
c_parser_skip_until_found (parser, CPP_SEMICOLON,
"expected %<;%>");
}
if (ivdep && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_ivdep_kind),
integer_zero_node);
if (unroll && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
build_int_cst (integer_type_node, unroll));
}
/* Parse the increment expression (the third expression in a
for-statement). In the case of a foreach-statement, this is
the expression that follows the 'in'. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
if (is_foreach_statement)
{
c_parser_error (parser, "missing collection in fast enumeration");
collection_expression = error_mark_node;
}
else
incr = c_process_expr_stmt (loc, NULL_TREE);
}
else
{
if (is_foreach_statement)
collection_expression = c_fully_fold (c_parser_expression (parser).value,
false, NULL);
else
{
struct c_expr ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (loc, ce, true, false);
incr = c_process_expr_stmt (loc, ce.value);
}
}
parens.skip_until_found_close (parser);
}
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
token_indent_info body_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
location_t loc_after_labels;
bool open_brace = c_parser_next_token_is (parser, CPP_OPEN_BRACE);
body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels);
if (is_foreach_statement)
objc_finish_foreach_loop (loc, object_expression, collection_expression, body, c_break_label, c_cont_label);
else
c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, true);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99 || c_dialect_objc ()));
c_parser_maybe_reclassify_token (parser);
token_indent_info next_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
warn_for_misleading_indentation (for_tinfo, body_tinfo, next_tinfo);
if (next_tinfo.type != CPP_SEMICOLON && !open_brace)
warn_for_multistatement_macros (loc_after_labels, next_tinfo.location,
for_tinfo.location, RID_FOR);
c_break_label = save_break;
c_cont_label = save_cont;
}
/* Parse an asm statement, a GNU extension. This is a full-blown asm
statement with inputs, outputs, clobbers, and volatile tag
allowed.
asm-statement:
asm type-qualifier[opt] ( asm-argument ) ;
asm type-qualifier[opt] goto ( asm-goto-argument ) ;
asm-argument:
asm-string-literal
asm-string-literal : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers[opt]
asm-goto-argument:
asm-string-literal : : asm-operands[opt] : asm-clobbers[opt] \
: asm-goto-operands
Qualifiers other than volatile are accepted in the syntax but
warned for. */
static tree
c_parser_asm_statement (c_parser *parser)
{
tree quals, str, outputs, inputs, clobbers, labels, ret;
bool simple, is_goto;
location_t asm_loc = c_parser_peek_token (parser)->location;
int section, nsections;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
c_parser_consume_token (parser);
if (c_parser_next_token_is_keyword (parser, RID_VOLATILE))
{
quals = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is_keyword (parser, RID_CONST)
|| c_parser_next_token_is_keyword (parser, RID_RESTRICT))
{
warning_at (c_parser_peek_token (parser)->location,
0,
"%E qualifier ignored on asm",
c_parser_peek_token (parser)->value);
quals = NULL_TREE;
c_parser_consume_token (parser);
}
else
quals = NULL_TREE;
is_goto = false;
if (c_parser_next_token_is_keyword (parser, RID_GOTO))
{
c_parser_consume_token (parser);
is_goto = true;
}
/* ??? Follow the C++ parser rather than using the
lex_untranslated_string kludge. */
parser->lex_untranslated_string = true;
ret = NULL;
matching_parens parens;
if (!parens.require_open (parser))
goto error;
str = c_parser_asm_string_literal (parser);
if (str == NULL_TREE)
goto error_close_paren;
simple = true;
outputs = NULL_TREE;
inputs = NULL_TREE;
clobbers = NULL_TREE;
labels = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto)
goto done_asm;
/* Parse each colon-delimited section of operands. */
nsections = 3 + is_goto;
for (section = 0; section < nsections; ++section)
{
if (!c_parser_require (parser, CPP_COLON,
is_goto
? G_("expected %<:%>")
: G_("expected %<:%> or %<)%>"),
UNKNOWN_LOCATION, is_goto))
goto error_close_paren;
/* Once past any colon, we're no longer a simple asm. */
simple = false;
if ((!c_parser_next_token_is (parser, CPP_COLON)
&& !c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
|| section == 3)
switch (section)
{
case 0:
/* For asm goto, we don't allow output operands, but reserve
the slot for a future extension that does allow them. */
if (!is_goto)
outputs = c_parser_asm_operands (parser);
break;
case 1:
inputs = c_parser_asm_operands (parser);
break;
case 2:
clobbers = c_parser_asm_clobbers (parser);
break;
case 3:
labels = c_parser_asm_goto_operands (parser);
break;
default:
gcc_unreachable ();
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto)
goto done_asm;
}
done_asm:
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto error;
}
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
ret = build_asm_stmt (quals, build_asm_expr (asm_loc, str, outputs, inputs,
clobbers, labels, simple));
error:
parser->lex_untranslated_string = false;
return ret;
error_close_paren:
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto error;
}
/* Parse asm operands, a GNU extension.
asm-operands:
asm-operand
asm-operands , asm-operand
asm-operand:
asm-string-literal ( expression )
[ identifier ] asm-string-literal ( expression )
*/
static tree
c_parser_asm_operands (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree name, str;
struct c_expr expr;
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
name = build_string (IDENTIFIER_LENGTH (id),
IDENTIFIER_POINTER (id));
}
else
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL);
return NULL_TREE;
}
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
else
name = NULL_TREE;
str = c_parser_asm_string_literal (parser);
if (str == NULL_TREE)
return NULL_TREE;
parser->lex_untranslated_string = false;
matching_parens parens;
if (!parens.require_open (parser))
{
parser->lex_untranslated_string = true;
return NULL_TREE;
}
expr = c_parser_expression (parser);
mark_exp_read (expr.value);
parser->lex_untranslated_string = true;
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
list = chainon (list, build_tree_list (build_tree_list (name, str),
expr.value));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse asm clobbers, a GNU extension.
asm-clobbers:
asm-string-literal
asm-clobbers , asm-string-literal
*/
static tree
c_parser_asm_clobbers (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree str = c_parser_asm_string_literal (parser);
if (str)
list = tree_cons (NULL_TREE, str, list);
else
return NULL_TREE;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse asm goto labels, a GNU extension.
asm-goto-operands:
identifier
asm-goto-operands , identifier
*/
static tree
c_parser_asm_goto_operands (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree name, label;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *tok = c_parser_peek_token (parser);
name = tok->value;
label = lookup_label_for_goto (tok->location, name);
c_parser_consume_token (parser);
TREE_USED (label) = 1;
}
else
{
c_parser_error (parser, "expected identifier");
return NULL_TREE;
}
name = build_string (IDENTIFIER_LENGTH (name),
IDENTIFIER_POINTER (name));
list = tree_cons (name, label, list);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
return nreverse (list);
}
}
/* Parse an expression other than a compound expression; that is, an
assignment expression (C90 6.3.16, C99 6.5.16, C11 6.5.16). If
AFTER is not NULL then it is an Objective-C message expression which
is the primary-expression starting the expression as an initializer.
assignment-expression:
conditional-expression
unary-expression assignment-operator assignment-expression
assignment-operator: one of
= *= /= %= += -= <<= >>= &= ^= |=
In GNU C we accept any conditional expression on the LHS and
diagnose the invalid lvalue rather than producing a syntax
error. */
static struct c_expr
c_parser_expr_no_commas (c_parser *parser, struct c_expr *after,
tree omp_atomic_lhs)
{
struct c_expr lhs, rhs, ret;
enum tree_code code;
location_t op_location, exp_location;
gcc_assert (!after || c_dialect_objc ());
lhs = c_parser_conditional_expression (parser, after, omp_atomic_lhs);
op_location = c_parser_peek_token (parser)->location;
switch (c_parser_peek_token (parser)->type)
{
case CPP_EQ:
code = NOP_EXPR;
break;
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
code = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
default:
return lhs;
}
c_parser_consume_token (parser);
exp_location = c_parser_peek_token (parser)->location;
rhs = c_parser_expr_no_commas (parser, NULL);
rhs = convert_lvalue_to_rvalue (exp_location, rhs, true, true);
ret.value = build_modify_expr (op_location, lhs.value, lhs.original_type,
code, exp_location, rhs.value,
rhs.original_type);
set_c_expr_source_range (&ret, lhs.get_start (), rhs.get_finish ());
if (code == NOP_EXPR)
ret.original_code = MODIFY_EXPR;
else
{
TREE_NO_WARNING (ret.value) = 1;
ret.original_code = ERROR_MARK;
}
ret.original_type = NULL;
return ret;
}
/* Parse a conditional expression (C90 6.3.15, C99 6.5.15, C11 6.5.15). If
AFTER is not NULL then it is an Objective-C message expression which is
the primary-expression starting the expression as an initializer.
conditional-expression:
logical-OR-expression
logical-OR-expression ? expression : conditional-expression
GNU extensions:
conditional-expression:
logical-OR-expression ? : conditional-expression
*/
static struct c_expr
c_parser_conditional_expression (c_parser *parser, struct c_expr *after,
tree omp_atomic_lhs)
{
struct c_expr cond, exp1, exp2, ret;
location_t start, cond_loc, colon_loc;
gcc_assert (!after || c_dialect_objc ());
cond = c_parser_binary_expression (parser, after, omp_atomic_lhs);
if (c_parser_next_token_is_not (parser, CPP_QUERY))
return cond;
if (cond.value != error_mark_node)
start = cond.get_start ();
else
start = UNKNOWN_LOCATION;
cond_loc = c_parser_peek_token (parser)->location;
cond = convert_lvalue_to_rvalue (cond_loc, cond, true, true);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COLON))
{
tree eptype = NULL_TREE;
location_t middle_loc = c_parser_peek_token (parser)->location;
pedwarn (middle_loc, OPT_Wpedantic,
"ISO C forbids omitting the middle term of a ?: expression");
if (TREE_CODE (cond.value) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (cond.value);
cond.value = TREE_OPERAND (cond.value, 0);
}
tree e = cond.value;
while (TREE_CODE (e) == COMPOUND_EXPR)
e = TREE_OPERAND (e, 1);
warn_for_omitted_condop (middle_loc, e);
/* Make sure first operand is calculated only once. */
exp1.value = save_expr (default_conversion (cond.value));
if (eptype)
exp1.value = build1 (EXCESS_PRECISION_EXPR, eptype, exp1.value);
exp1.original_type = NULL;
exp1.src_range = cond.src_range;
cond.value = c_objc_common_truthvalue_conversion (cond_loc, exp1.value);
c_inhibit_evaluation_warnings += cond.value == truthvalue_true_node;
}
else
{
cond.value
= c_objc_common_truthvalue_conversion
(cond_loc, default_conversion (cond.value));
c_inhibit_evaluation_warnings += cond.value == truthvalue_false_node;
exp1 = c_parser_expression_conv (parser);
mark_exp_read (exp1.value);
c_inhibit_evaluation_warnings +=
((cond.value == truthvalue_true_node)
- (cond.value == truthvalue_false_node));
}
colon_loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node;
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
{
location_t exp2_loc = c_parser_peek_token (parser)->location;
exp2 = c_parser_conditional_expression (parser, NULL, NULL_TREE);
exp2 = convert_lvalue_to_rvalue (exp2_loc, exp2, true, true);
}
c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node;
location_t loc1 = make_location (exp1.get_start (), exp1.src_range);
location_t loc2 = make_location (exp2.get_start (), exp2.src_range);
ret.value = build_conditional_expr (colon_loc, cond.value,
cond.original_code == C_MAYBE_CONST_EXPR,
exp1.value, exp1.original_type, loc1,
exp2.value, exp2.original_type, loc2);
ret.original_code = ERROR_MARK;
if (exp1.value == error_mark_node || exp2.value == error_mark_node)
ret.original_type = NULL;
else
{
tree t1, t2;
/* If both sides are enum type, the default conversion will have
made the type of the result be an integer type. We want to
remember the enum types we started with. */
t1 = exp1.original_type ? exp1.original_type : TREE_TYPE (exp1.value);
t2 = exp2.original_type ? exp2.original_type : TREE_TYPE (exp2.value);
ret.original_type = ((t1 != error_mark_node
&& t2 != error_mark_node
&& (TYPE_MAIN_VARIANT (t1)
== TYPE_MAIN_VARIANT (t2)))
? t1
: NULL);
}
set_c_expr_source_range (&ret, start, exp2.get_finish ());
return ret;
}
/* Parse a binary expression; that is, a logical-OR-expression (C90
6.3.5-6.3.14, C99 6.5.5-6.5.14, C11 6.5.5-6.5.14). If AFTER is not
NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
OMP_ATOMIC_LHS is NULL, unless parsing OpenMP #pragma omp atomic,
when it should be the unfolded lhs. In a valid OpenMP source,
one of the operands of the toplevel binary expression must be equal
to it. In that case, just return a build2 created binary operation
rather than result of parser_build_binary_op.
multiplicative-expression:
cast-expression
multiplicative-expression * cast-expression
multiplicative-expression / cast-expression
multiplicative-expression % cast-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
AND-expression:
equality-expression
AND-expression & equality-expression
exclusive-OR-expression:
AND-expression
exclusive-OR-expression ^ AND-expression
inclusive-OR-expression:
exclusive-OR-expression
inclusive-OR-expression | exclusive-OR-expression
logical-AND-expression:
inclusive-OR-expression
logical-AND-expression && inclusive-OR-expression
logical-OR-expression:
logical-AND-expression
logical-OR-expression || logical-AND-expression
*/
static struct c_expr
c_parser_binary_expression (c_parser *parser, struct c_expr *after,
tree omp_atomic_lhs)
{
/* A binary expression is parsed using operator-precedence parsing,
with the operands being cast expressions. All the binary
operators are left-associative. Thus a binary expression is of
form:
E0 op1 E1 op2 E2 ...
which we represent on a stack. On the stack, the precedence
levels are strictly increasing. When a new operator is
encountered of higher precedence than that at the top of the
stack, it is pushed; its LHS is the top expression, and its RHS
is everything parsed until it is popped. When a new operator is
encountered with precedence less than or equal to that at the top
of the stack, triples E[i-1] op[i] E[i] are popped and replaced
by the result of the operation until the operator at the top of
the stack has lower precedence than the new operator or there is
only one element on the stack; then the top expression is the LHS
of the new operator. In the case of logical AND and OR
expressions, we also need to adjust c_inhibit_evaluation_warnings
as appropriate when the operators are pushed and popped. */
struct {
/* The expression at this stack level. */
struct c_expr expr;
/* The precedence of the operator on its left, PREC_NONE at the
bottom of the stack. */
enum c_parser_prec prec;
/* The operation on its left. */
enum tree_code op;
/* The source location of this operation. */
location_t loc;
/* The sizeof argument if expr.original_code == SIZEOF_EXPR. */
tree sizeof_arg;
} stack[NUM_PRECS];
int sp;
/* Location of the binary operator. */
location_t binary_loc = UNKNOWN_LOCATION; /* Quiet warning. */
#define POP \
do { \
switch (stack[sp].op) \
{ \
case TRUTH_ANDIF_EXPR: \
c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \
== truthvalue_false_node); \
break; \
case TRUTH_ORIF_EXPR: \
c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \
== truthvalue_true_node); \
break; \
case TRUNC_DIV_EXPR: \
if (stack[sp - 1].expr.original_code == SIZEOF_EXPR \
&& stack[sp].expr.original_code == SIZEOF_EXPR) \
{ \
tree type0 = stack[sp - 1].sizeof_arg; \
tree type1 = stack[sp].sizeof_arg; \
tree first_arg = type0; \
if (!TYPE_P (type0)) \
type0 = TREE_TYPE (type0); \
if (!TYPE_P (type1)) \
type1 = TREE_TYPE (type1); \
if (POINTER_TYPE_P (type0) \
&& comptypes (TREE_TYPE (type0), type1) \
&& !(TREE_CODE (first_arg) == PARM_DECL \
&& C_ARRAY_PARAMETER (first_arg) \
&& warn_sizeof_array_argument)) \
if (warning_at (stack[sp].loc, OPT_Wsizeof_pointer_div, \
"division %<sizeof (%T) / sizeof (%T)%> does " \
"not compute the number of array elements", \
type0, type1)) \
if (DECL_P (first_arg)) \
inform (DECL_SOURCE_LOCATION (first_arg), \
"first %<sizeof%> operand was declared here"); \
} \
break; \
default: \
break; \
} \
stack[sp - 1].expr \
= convert_lvalue_to_rvalue (stack[sp - 1].loc, \
stack[sp - 1].expr, true, true); \
stack[sp].expr \
= convert_lvalue_to_rvalue (stack[sp].loc, \
stack[sp].expr, true, true); \
if (__builtin_expect (omp_atomic_lhs != NULL_TREE, 0) && sp == 1 \
&& c_parser_peek_token (parser)->type == CPP_SEMICOLON \
&& ((1 << stack[sp].prec) \
& ((1 << PREC_BITOR) | (1 << PREC_BITXOR) | (1 << PREC_BITAND) \
| (1 << PREC_SHIFT) | (1 << PREC_ADD) | (1 << PREC_MULT))) \
&& stack[sp].op != TRUNC_MOD_EXPR \
&& stack[0].expr.value != error_mark_node \
&& stack[1].expr.value != error_mark_node \
&& (c_tree_equal (stack[0].expr.value, omp_atomic_lhs) \
|| c_tree_equal (stack[1].expr.value, omp_atomic_lhs))) \
stack[0].expr.value \
= build2 (stack[1].op, TREE_TYPE (stack[0].expr.value), \
stack[0].expr.value, stack[1].expr.value); \
else \
stack[sp - 1].expr = parser_build_binary_op (stack[sp].loc, \
stack[sp].op, \
stack[sp - 1].expr, \
stack[sp].expr); \
sp--; \
} while (0)
gcc_assert (!after || c_dialect_objc ());
stack[0].loc = c_parser_peek_token (parser)->location;
stack[0].expr = c_parser_cast_expression (parser, after);
stack[0].prec = PREC_NONE;
stack[0].sizeof_arg = c_last_sizeof_arg;
sp = 0;
while (true)
{
enum c_parser_prec oprec;
enum tree_code ocode;
source_range src_range;
if (parser->error)
goto out;
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT:
oprec = PREC_MULT;
ocode = MULT_EXPR;
break;
case CPP_DIV:
oprec = PREC_MULT;
ocode = TRUNC_DIV_EXPR;
break;
case CPP_MOD:
oprec = PREC_MULT;
ocode = TRUNC_MOD_EXPR;
break;
case CPP_PLUS:
oprec = PREC_ADD;
ocode = PLUS_EXPR;
break;
case CPP_MINUS:
oprec = PREC_ADD;
ocode = MINUS_EXPR;
break;
case CPP_LSHIFT:
oprec = PREC_SHIFT;
ocode = LSHIFT_EXPR;
break;
case CPP_RSHIFT:
oprec = PREC_SHIFT;
ocode = RSHIFT_EXPR;
break;
case CPP_LESS:
oprec = PREC_REL;
ocode = LT_EXPR;
break;
case CPP_GREATER:
oprec = PREC_REL;
ocode = GT_EXPR;
break;
case CPP_LESS_EQ:
oprec = PREC_REL;
ocode = LE_EXPR;
break;
case CPP_GREATER_EQ:
oprec = PREC_REL;
ocode = GE_EXPR;
break;
case CPP_EQ_EQ:
oprec = PREC_EQ;
ocode = EQ_EXPR;
break;
case CPP_NOT_EQ:
oprec = PREC_EQ;
ocode = NE_EXPR;
break;
case CPP_AND:
oprec = PREC_BITAND;
ocode = BIT_AND_EXPR;
break;
case CPP_XOR:
oprec = PREC_BITXOR;
ocode = BIT_XOR_EXPR;
break;
case CPP_OR:
oprec = PREC_BITOR;
ocode = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
oprec = PREC_LOGAND;
ocode = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
oprec = PREC_LOGOR;
ocode = TRUTH_ORIF_EXPR;
break;
default:
/* Not a binary operator, so end of the binary
expression. */
goto out;
}
binary_loc = c_parser_peek_token (parser)->location;
while (oprec <= stack[sp].prec)
POP;
c_parser_consume_token (parser);
switch (ocode)
{
case TRUTH_ANDIF_EXPR:
src_range = stack[sp].expr.src_range;
stack[sp].expr
= convert_lvalue_to_rvalue (stack[sp].loc,
stack[sp].expr, true, true);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(stack[sp].loc, default_conversion (stack[sp].expr.value));
c_inhibit_evaluation_warnings += (stack[sp].expr.value
== truthvalue_false_node);
set_c_expr_source_range (&stack[sp].expr, src_range);
break;
case TRUTH_ORIF_EXPR:
src_range = stack[sp].expr.src_range;
stack[sp].expr
= convert_lvalue_to_rvalue (stack[sp].loc,
stack[sp].expr, true, true);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(stack[sp].loc, default_conversion (stack[sp].expr.value));
c_inhibit_evaluation_warnings += (stack[sp].expr.value
== truthvalue_true_node);
set_c_expr_source_range (&stack[sp].expr, src_range);
break;
default:
break;
}
sp++;
stack[sp].loc = binary_loc;
stack[sp].expr = c_parser_cast_expression (parser, NULL);
stack[sp].prec = oprec;
stack[sp].op = ocode;
stack[sp].sizeof_arg = c_last_sizeof_arg;
}
out:
while (sp > 0)
POP;
return stack[0].expr;
#undef POP
}
/* Parse a cast expression (C90 6.3.4, C99 6.5.4, C11 6.5.4). If AFTER
is not NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
cast-expression:
unary-expression
( type-name ) unary-expression
*/
static struct c_expr
c_parser_cast_expression (c_parser *parser, struct c_expr *after)
{
location_t cast_loc = c_parser_peek_token (parser)->location;
gcc_assert (!after || c_dialect_objc ());
if (after)
return c_parser_postfix_expression_after_primary (parser,
cast_loc, *after);
/* If the expression begins with a parenthesized type name, it may
be either a cast or a compound literal; we need to see whether
the next character is '{' to tell the difference. If not, it is
an unary expression. Full detection of unknown typenames here
would require a 3-token lookahead. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
struct c_type_name *type_name;
struct c_expr ret;
struct c_expr expr;
matching_parens parens;
parens.consume_open (parser);
type_name = c_parser_type_name (parser, true);
parens.skip_until_found_close (parser);
if (type_name == NULL)
{
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
/* Save casted types in the function's used types hash table. */
used_types_insert (type_name->specs->type);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_postfix_expression_after_paren_type (parser, type_name,
cast_loc);
if (type_name->specs->alignas_p)
error_at (type_name->specs->locations[cdw_alignas],
"alignment specified for type name in cast");
{
location_t expr_loc = c_parser_peek_token (parser)->location;
expr = c_parser_cast_expression (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, true, true);
}
ret.value = c_cast_expr (cast_loc, type_name, expr.value);
if (ret.value && expr.value)
set_c_expr_source_range (&ret, cast_loc, expr.get_finish ());
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
else
return c_parser_unary_expression (parser);
}
/* Parse an unary expression (C90 6.3.3, C99 6.5.3, C11 6.5.3).
unary-expression:
postfix-expression
++ unary-expression
-- unary-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-name )
unary-operator: one of
& * + - ~ !
GNU extensions:
unary-expression:
__alignof__ unary-expression
__alignof__ ( type-name )
&& identifier
(C11 permits _Alignof with type names only.)
unary-operator: one of
__extension__ __real__ __imag__
Transactional Memory:
unary-expression:
transaction-expression
In addition, the GNU syntax treats ++ and -- as unary operators, so
they may be applied to cast expressions with errors for non-lvalues
given later. */
static struct c_expr
c_parser_unary_expression (c_parser *parser)
{
int ext;
struct c_expr ret, op;
location_t op_loc = c_parser_peek_token (parser)->location;
location_t exp_loc;
location_t finish;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS_PLUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, PREINCREMENT_EXPR, op);
case CPP_MINUS_MINUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, PREDECREMENT_EXPR, op);
case CPP_AND:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
mark_exp_read (op.value);
return parser_build_unary_op (op_loc, ADDR_EXPR, op);
case CPP_MULT:
{
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
finish = op.get_finish ();
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
location_t combined_loc = make_location (op_loc, op_loc, finish);
ret.value = build_indirect_ref (combined_loc, op.value, RO_UNARY_STAR);
ret.src_range.m_start = op_loc;
ret.src_range.m_finish = finish;
return ret;
}
case CPP_PLUS:
if (!c_dialect_objc () && !in_system_header_at (input_location))
warning_at (op_loc,
OPT_Wtraditional,
"traditional C rejects the unary plus operator");
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
return parser_build_unary_op (op_loc, CONVERT_EXPR, op);
case CPP_MINUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
return parser_build_unary_op (op_loc, NEGATE_EXPR, op);
case CPP_COMPL:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
return parser_build_unary_op (op_loc, BIT_NOT_EXPR, op);
case CPP_NOT:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
return parser_build_unary_op (op_loc, TRUTH_NOT_EXPR, op);
case CPP_AND_AND:
/* Refer to the address of a label as a pointer. */
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ret.value = finish_label_address_expr
(c_parser_peek_token (parser)->value, op_loc);
set_c_expr_source_range (&ret, op_loc,
c_parser_peek_token (parser)->get_finish ());
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected identifier");
ret.set_error ();
}
return ret;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_SIZEOF:
return c_parser_sizeof_expression (parser);
case RID_ALIGNOF:
return c_parser_alignof_expression (parser);
case RID_EXTENSION:
c_parser_consume_token (parser);
ext = disable_extension_diagnostics ();
ret = c_parser_cast_expression (parser, NULL);
restore_extension_diagnostics (ext);
return ret;
case RID_REALPART:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, REALPART_EXPR, op);
case RID_IMAGPART:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, IMAGPART_EXPR, op);
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
return c_parser_transaction_expression (parser,
c_parser_peek_token (parser)->keyword);
default:
return c_parser_postfix_expression (parser);
}
default:
return c_parser_postfix_expression (parser);
}
}
/* Parse a sizeof expression. */
static struct c_expr
c_parser_sizeof_expression (c_parser *parser)
{
struct c_expr expr;
struct c_expr result;
location_t expr_loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF));
location_t start;
location_t finish = UNKNOWN_LOCATION;
start = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_sizeof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either sizeof ( type-name ) or sizeof unary-expression
starting with a compound literal. */
struct c_type_name *type_name;
matching_parens parens;
parens.consume_open (parser);
expr_loc = c_parser_peek_token (parser)->location;
type_name = c_parser_type_name (parser, true);
parens.skip_until_found_close (parser);
finish = parser->tokens_buf[0].location;
if (type_name == NULL)
{
struct c_expr ret;
c_inhibit_evaluation_warnings--;
in_sizeof--;
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name,
expr_loc);
finish = expr.get_finish ();
goto sizeof_expr;
}
/* sizeof ( type-name ). */
if (type_name->specs->alignas_p)
error_at (type_name->specs->locations[cdw_alignas],
"alignment specified for type name in %<sizeof%>");
c_inhibit_evaluation_warnings--;
in_sizeof--;
result = c_expr_sizeof_type (expr_loc, type_name);
}
else
{
expr_loc = c_parser_peek_token (parser)->location;
expr = c_parser_unary_expression (parser);
finish = expr.get_finish ();
sizeof_expr:
c_inhibit_evaluation_warnings--;
in_sizeof--;
mark_exp_read (expr.value);
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error_at (expr_loc, "%<sizeof%> applied to a bit-field");
result = c_expr_sizeof_expr (expr_loc, expr);
}
if (finish != UNKNOWN_LOCATION)
set_c_expr_source_range (&result, start, finish);
return result;
}
/* Parse an alignof expression. */
static struct c_expr
c_parser_alignof_expression (c_parser *parser)
{
struct c_expr expr;
location_t start_loc = c_parser_peek_token (parser)->location;
location_t end_loc;
tree alignof_spelling = c_parser_peek_token (parser)->value;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF));
bool is_c11_alignof = strcmp (IDENTIFIER_POINTER (alignof_spelling),
"_Alignof") == 0;
/* A diagnostic is not required for the use of this identifier in
the implementation namespace; only diagnose it for the C11
spelling because of existing code using the other spellings. */
if (is_c11_alignof)
{
if (flag_isoc99)
pedwarn_c99 (start_loc, OPT_Wpedantic, "ISO C99 does not support %qE",
alignof_spelling);
else
pedwarn_c99 (start_loc, OPT_Wpedantic, "ISO C90 does not support %qE",
alignof_spelling);
}
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_alignof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either __alignof__ ( type-name ) or __alignof__
unary-expression starting with a compound literal. */
location_t loc;
struct c_type_name *type_name;
struct c_expr ret;
matching_parens parens;
parens.consume_open (parser);
loc = c_parser_peek_token (parser)->location;
type_name = c_parser_type_name (parser, true);
end_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
if (type_name == NULL)
{
struct c_expr ret;
c_inhibit_evaluation_warnings--;
in_alignof--;
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name,
loc);
goto alignof_expr;
}
/* alignof ( type-name ). */
if (type_name->specs->alignas_p)
error_at (type_name->specs->locations[cdw_alignas],
"alignment specified for type name in %qE",
alignof_spelling);
c_inhibit_evaluation_warnings--;
in_alignof--;
ret.value = c_sizeof_or_alignof_type (loc, groktypename (type_name,
NULL, NULL),
false, is_c11_alignof, 1);
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
set_c_expr_source_range (&ret, start_loc, end_loc);
return ret;
}
else
{
struct c_expr ret;
expr = c_parser_unary_expression (parser);
end_loc = expr.src_range.m_finish;
alignof_expr:
mark_exp_read (expr.value);
c_inhibit_evaluation_warnings--;
in_alignof--;
if (is_c11_alignof)
pedwarn (start_loc,
OPT_Wpedantic, "ISO C does not allow %<%E (expression)%>",
alignof_spelling);
ret.value = c_alignof_expr (start_loc, expr.value);
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
set_c_expr_source_range (&ret, start_loc, end_loc);
return ret;
}
}
/* Helper function to read arguments of builtins which are interfaces
for the middle-end nodes like COMPLEX_EXPR, VEC_PERM_EXPR and
others. The name of the builtin is passed using BNAME parameter.
Function returns true if there were no errors while parsing and
stores the arguments in CEXPR_LIST. If it returns true,
*OUT_CLOSE_PAREN_LOC is written to with the location of the closing
parenthesis. */
static bool
c_parser_get_builtin_args (c_parser *parser, const char *bname,
vec<c_expr_t, va_gc> **ret_cexpr_list,
bool choose_expr_p,
location_t *out_close_paren_loc)
{
location_t loc = c_parser_peek_token (parser)->location;
vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t expr;
bool saved_force_folding_builtin_constant_p;
*ret_cexpr_list = NULL;
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
{
error_at (loc, "cannot take address of %qs", bname);
return false;
}
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
*out_close_paren_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
return true;
}
saved_force_folding_builtin_constant_p
= force_folding_builtin_constant_p;
force_folding_builtin_constant_p |= choose_expr_p;
expr = c_parser_expr_no_commas (parser, NULL);
force_folding_builtin_constant_p
= saved_force_folding_builtin_constant_p;
vec_alloc (cexpr_list, 1);
vec_safe_push (cexpr_list, expr);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
expr = c_parser_expr_no_commas (parser, NULL);
vec_safe_push (cexpr_list, expr);
}
*out_close_paren_loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
return false;
*ret_cexpr_list = cexpr_list;
return true;
}
/* This represents a single generic-association. */
struct c_generic_association
{
/* The location of the starting token of the type. */
location_t type_location;
/* The association's type, or NULL_TREE for 'default'. */
tree type;
/* The association's expression. */
struct c_expr expression;
};
/* Parse a generic-selection. (C11 6.5.1.1).
generic-selection:
_Generic ( assignment-expression , generic-assoc-list )
generic-assoc-list:
generic-association
generic-assoc-list , generic-association
generic-association:
type-name : assignment-expression
default : assignment-expression
*/
static struct c_expr
c_parser_generic_selection (c_parser *parser)
{
struct c_expr selector, error_expr;
tree selector_type;
struct c_generic_association matched_assoc;
bool match_found = false;
location_t generic_loc, selector_loc;
error_expr.original_code = ERROR_MARK;
error_expr.original_type = NULL;
error_expr.set_error ();
matched_assoc.type_location = UNKNOWN_LOCATION;
matched_assoc.type = NULL_TREE;
matched_assoc.expression = error_expr;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_GENERIC));
generic_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (flag_isoc99)
pedwarn_c99 (generic_loc, OPT_Wpedantic,
"ISO C99 does not support %<_Generic%>");
else
pedwarn_c99 (generic_loc, OPT_Wpedantic,
"ISO C90 does not support %<_Generic%>");
matching_parens parens;
if (!parens.require_open (parser))
return error_expr;
c_inhibit_evaluation_warnings++;
selector_loc = c_parser_peek_token (parser)->location;
selector = c_parser_expr_no_commas (parser, NULL);
selector = default_function_array_conversion (selector_loc, selector);
c_inhibit_evaluation_warnings--;
if (selector.value == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return selector;
}
selector_type = TREE_TYPE (selector.value);
/* In ISO C terms, rvalues (including the controlling expression of
_Generic) do not have qualified types. */
if (TREE_CODE (selector_type) != ARRAY_TYPE)
selector_type = TYPE_MAIN_VARIANT (selector_type);
/* In ISO C terms, _Noreturn is not part of the type of expressions
such as &abort, but in GCC it is represented internally as a type
qualifier. */
if (FUNCTION_POINTER_TYPE_P (selector_type)
&& TYPE_QUALS (TREE_TYPE (selector_type)) != TYPE_UNQUALIFIED)
selector_type
= build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (selector_type)));
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
auto_vec<c_generic_association> associations;
while (1)
{
struct c_generic_association assoc, *iter;
unsigned int ix;
c_token *token = c_parser_peek_token (parser);
assoc.type_location = token->location;
if (token->type == CPP_KEYWORD && token->keyword == RID_DEFAULT)
{
c_parser_consume_token (parser);
assoc.type = NULL_TREE;
}
else
{
struct c_type_name *type_name;
type_name = c_parser_type_name (parser);
if (type_name == NULL)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
assoc.type = groktypename (type_name, NULL, NULL);
if (assoc.type == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
if (TREE_CODE (assoc.type) == FUNCTION_TYPE)
error_at (assoc.type_location,
"%<_Generic%> association has function type");
else if (!COMPLETE_TYPE_P (assoc.type))
error_at (assoc.type_location,
"%<_Generic%> association has incomplete type");
if (variably_modified_type_p (assoc.type, NULL_TREE))
error_at (assoc.type_location,
"%<_Generic%> association has "
"variable length type");
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
assoc.expression = c_parser_expr_no_commas (parser, NULL);
if (assoc.expression.value == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
for (ix = 0; associations.iterate (ix, &iter); ++ix)
{
if (assoc.type == NULL_TREE)
{
if (iter->type == NULL_TREE)
{
error_at (assoc.type_location,
"duplicate %<default%> case in %<_Generic%>");
inform (iter->type_location, "original %<default%> is here");
}
}
else if (iter->type != NULL_TREE)
{
if (comptypes (assoc.type, iter->type))
{
error_at (assoc.type_location,
"%<_Generic%> specifies two compatible types");
inform (iter->type_location, "compatible type is here");
}
}
}
if (assoc.type == NULL_TREE)
{
if (!match_found)
{
matched_assoc = assoc;
match_found = true;
}
}
else if (comptypes (assoc.type, selector_type))
{
if (!match_found || matched_assoc.type == NULL_TREE)
{
matched_assoc = assoc;
match_found = true;
}
else
{
error_at (assoc.type_location,
"%<_Generic%> selector matches multiple associations");
inform (matched_assoc.type_location,
"other match is here");
}
}
associations.safe_push (assoc);
if (c_parser_peek_token (parser)->type != CPP_COMMA)
break;
c_parser_consume_token (parser);
}
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
if (!match_found)
{
error_at (selector_loc, "%<_Generic%> selector of type %qT is not "
"compatible with any association",
selector_type);
return error_expr;
}
return matched_assoc.expression;
}
/* Check the validity of a function pointer argument *EXPR (argument
position POS) to __builtin_tgmath. Return the number of function
arguments if possibly valid; return 0 having reported an error if
not valid. */
static unsigned int
check_tgmath_function (c_expr *expr, unsigned int pos)
{
tree type = TREE_TYPE (expr->value);
if (!FUNCTION_POINTER_TYPE_P (type))
{
error_at (expr->get_location (),
"argument %u of %<__builtin_tgmath%> is not a function pointer",
pos);
return 0;
}
type = TREE_TYPE (type);
if (!prototype_p (type))
{
error_at (expr->get_location (),
"argument %u of %<__builtin_tgmath%> is unprototyped", pos);
return 0;
}
if (stdarg_p (type))
{
error_at (expr->get_location (),
"argument %u of %<__builtin_tgmath%> has variable arguments",
pos);
return 0;
}
unsigned int nargs = 0;
function_args_iterator iter;
tree t;
FOREACH_FUNCTION_ARGS (type, t, iter)
{
if (t == void_type_node)
break;
nargs++;
}
if (nargs == 0)
{
error_at (expr->get_location (),
"argument %u of %<__builtin_tgmath%> has no arguments", pos);
return 0;
}
return nargs;
}
/* Ways in which a parameter or return value of a type-generic macro
may vary between the different functions the macro may call. */
enum tgmath_parm_kind
{
tgmath_fixed, tgmath_real, tgmath_complex
};
/* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2,
C11 6.5.1-6.5.2). Compound literals aren't handled here; callers have to
call c_parser_postfix_expression_after_paren_type on encountering them.
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( argument-expression-list[opt] )
postfix-expression . identifier
postfix-expression -> identifier
postfix-expression ++
postfix-expression --
( type-name ) { initializer-list }
( type-name ) { initializer-list , }
argument-expression-list:
argument-expression
argument-expression-list , argument-expression
primary-expression:
identifier
constant
string-literal
( expression )
generic-selection
GNU extensions:
primary-expression:
__func__
(treated as a keyword in GNU C)
__FUNCTION__
__PRETTY_FUNCTION__
( compound-statement )
__builtin_va_arg ( assignment-expression , type-name )
__builtin_offsetof ( type-name , offsetof-member-designator )
__builtin_choose_expr ( assignment-expression ,
assignment-expression ,
assignment-expression )
__builtin_types_compatible_p ( type-name , type-name )
__builtin_tgmath ( expr-list )
__builtin_complex ( assignment-expression , assignment-expression )
__builtin_shuffle ( assignment-expression , assignment-expression )
__builtin_shuffle ( assignment-expression ,
assignment-expression ,
assignment-expression, )
offsetof-member-designator:
identifier
offsetof-member-designator . identifier
offsetof-member-designator [ expression ]
Objective-C:
primary-expression:
[ objc-receiver objc-message-args ]
@selector ( objc-selector-arg )
@protocol ( identifier )
@encode ( type-name )
objc-string-literal
Classname . identifier
*/
static struct c_expr
c_parser_postfix_expression (c_parser *parser)
{
struct c_expr expr, e1;
struct c_type_name *t1, *t2;
location_t loc = c_parser_peek_token (parser)->location;
source_range tok_range = c_parser_peek_token (parser)->get_range ();
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
switch (c_parser_peek_token (parser)->type)
{
case CPP_NUMBER:
expr.value = c_parser_peek_token (parser)->value;
set_c_expr_source_range (&expr, tok_range);
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (TREE_CODE (expr.value) == FIXED_CST
&& !targetm.fixed_point_supported_p ())
{
error_at (loc, "fixed-point types not supported for this target");
expr.set_error ();
}
break;
case CPP_CHAR:
case CPP_CHAR16:
case CPP_CHAR32:
case CPP_WCHAR:
expr.value = c_parser_peek_token (parser)->value;
/* For the purpose of warning when a pointer is compared with
a zero character constant. */
expr.original_type = char_type_node;
set_c_expr_source_range (&expr, tok_range);
c_parser_consume_token (parser);
break;
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
expr.value = c_parser_peek_token (parser)->value;
set_c_expr_source_range (&expr, tok_range);
expr.original_code = STRING_CST;
c_parser_consume_token (parser);
break;
case CPP_OBJC_STRING:
gcc_assert (c_dialect_objc ());
expr.value
= objc_build_string_object (c_parser_peek_token (parser)->value);
set_c_expr_source_range (&expr, tok_range);
c_parser_consume_token (parser);
break;
case CPP_NAME:
switch (c_parser_peek_token (parser)->id_kind)
{
case C_ID_ID:
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
expr.value = build_external_ref (loc, id,
(c_parser_peek_token (parser)->type
== CPP_OPEN_PAREN),
&expr.original_type);
set_c_expr_source_range (&expr, tok_range);
break;
}
case C_ID_CLASSNAME:
{
/* Here we parse the Objective-C 2.0 Class.name dot
syntax. */
tree class_name = c_parser_peek_token (parser)->value;
tree component;
c_parser_consume_token (parser);
gcc_assert (c_dialect_objc ());
if (!c_parser_require (parser, CPP_DOT, "expected %<.%>"))
{
expr.set_error ();
break;
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
expr.set_error ();
break;
}
c_token *component_tok = c_parser_peek_token (parser);
component = component_tok->value;
location_t end_loc = component_tok->get_finish ();
c_parser_consume_token (parser);
expr.value = objc_build_class_component_ref (class_name,
component);
set_c_expr_source_range (&expr, loc, end_loc);
break;
}
default:
c_parser_error (parser, "expected expression");
expr.set_error ();
break;
}
break;
case CPP_OPEN_PAREN:
/* A parenthesized expression, statement expression or compound
literal. */
if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE)
{
/* A statement expression. */
tree stmt;
location_t brace_loc;
c_parser_consume_token (parser);
brace_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (!building_stmt_list_p ())
{
error_at (loc, "braced-group within expression allowed "
"only inside a function");
parser->error = true;
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
stmt = c_begin_stmt_expr ();
c_parser_compound_statement_nostart (parser);
location_t close_loc = c_parser_peek_token (parser)->location;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids braced-groups within expressions");
expr.value = c_finish_stmt_expr (brace_loc, stmt);
set_c_expr_source_range (&expr, loc, close_loc);
mark_exp_read (expr.value);
}
else
{
/* A parenthesized expression. */
location_t loc_open_paren = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
expr = c_parser_expression (parser);
if (TREE_CODE (expr.value) == MODIFY_EXPR)
TREE_NO_WARNING (expr.value) = 1;
if (expr.original_code != C_MAYBE_CONST_EXPR
&& expr.original_code != SIZEOF_EXPR)
expr.original_code = ERROR_MARK;
/* Don't change EXPR.ORIGINAL_TYPE. */
location_t loc_close_paren = c_parser_peek_token (parser)->location;
set_c_expr_source_range (&expr, loc_open_paren, loc_close_paren);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>", loc_open_paren);
}
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_FUNCTION_NAME:
pedwarn (loc, OPT_Wpedantic, "ISO C does not support "
"%<__FUNCTION__%> predefined identifier");
expr.value = fname_decl (loc,
c_parser_peek_token (parser)->keyword,
c_parser_peek_token (parser)->value);
set_c_expr_source_range (&expr, loc, loc);
c_parser_consume_token (parser);
break;
case RID_PRETTY_FUNCTION_NAME:
pedwarn (loc, OPT_Wpedantic, "ISO C does not support "
"%<__PRETTY_FUNCTION__%> predefined identifier");
expr.value = fname_decl (loc,
c_parser_peek_token (parser)->keyword,
c_parser_peek_token (parser)->value);
set_c_expr_source_range (&expr, loc, loc);
c_parser_consume_token (parser);
break;
case RID_C99_FUNCTION_NAME:
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support "
"%<__func__%> predefined identifier");
expr.value = fname_decl (loc,
c_parser_peek_token (parser)->keyword,
c_parser_peek_token (parser)->value);
set_c_expr_source_range (&expr, loc, loc);
c_parser_consume_token (parser);
break;
case RID_VA_ARG:
{
location_t start_loc = loc;
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
e1 = c_parser_expr_no_commas (parser, NULL);
mark_exp_read (e1.value);
e1.value = c_fully_fold (e1.value, false, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
loc = c_parser_peek_token (parser)->location;
t1 = c_parser_type_name (parser);
location_t end_loc = c_parser_peek_token (parser)->get_finish ();
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (t1 == NULL)
{
expr.set_error ();
}
else
{
tree type_expr = NULL_TREE;
expr.value = c_build_va_arg (start_loc, e1.value, loc,
groktypename (t1, &type_expr, NULL));
if (type_expr)
{
expr.value = build2 (C_MAYBE_CONST_EXPR,
TREE_TYPE (expr.value), type_expr,
expr.value);
C_MAYBE_CONST_EXPR_NON_CONST (expr.value) = true;
}
set_c_expr_source_range (&expr, start_loc, end_loc);
}
}
break;
case RID_OFFSETOF:
{
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
parser->error = true;
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
gcc_assert (parser->error);
if (parser->error)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
tree type = groktypename (t1, NULL, NULL);
tree offsetof_ref;
if (type == error_mark_node)
offsetof_ref = error_mark_node;
else
{
offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node);
SET_EXPR_LOCATION (offsetof_ref, loc);
}
/* Parse the second argument to __builtin_offsetof. We
must have one identifier, and beyond that we want to
accept sub structure and sub array references. */
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *comp_tok = c_parser_peek_token (parser);
offsetof_ref = build_component_ref
(loc, offsetof_ref, comp_tok->value, comp_tok->location);
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_DOT)
|| c_parser_next_token_is (parser,
CPP_OPEN_SQUARE)
|| c_parser_next_token_is (parser,
CPP_DEREF))
{
if (c_parser_next_token_is (parser, CPP_DEREF))
{
loc = c_parser_peek_token (parser)->location;
offsetof_ref = build_array_ref (loc,
offsetof_ref,
integer_zero_node);
goto do_dot;
}
else if (c_parser_next_token_is (parser, CPP_DOT))
{
do_dot:
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser,
CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
c_token *comp_tok = c_parser_peek_token (parser);
offsetof_ref = build_component_ref
(loc, offsetof_ref, comp_tok->value,
comp_tok->location);
c_parser_consume_token (parser);
}
else
{
struct c_expr ce;
tree idx;
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (loc, ce, false, false);
idx = ce.value;
idx = c_fully_fold (idx, false, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
offsetof_ref = build_array_ref (loc, offsetof_ref, idx);
}
}
}
else
c_parser_error (parser, "expected identifier");
location_t end_loc = c_parser_peek_token (parser)->get_finish ();
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = fold_offsetof (offsetof_ref);
set_c_expr_source_range (&expr, loc, end_loc);
}
break;
case RID_CHOOSE_EXPR:
{
vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t *e1_p, *e2_p, *e3_p;
tree c;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_choose_expr",
&cexpr_list, true,
&close_paren_loc))
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) != 3)
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_choose_expr%>");
expr.set_error ();
break;
}
e1_p = &(*cexpr_list)[0];
e2_p = &(*cexpr_list)[1];
e3_p = &(*cexpr_list)[2];
c = e1_p->value;
mark_exp_read (e2_p->value);
mark_exp_read (e3_p->value);
if (TREE_CODE (c) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (c)))
error_at (loc,
"first argument to %<__builtin_choose_expr%> not"
" a constant");
constant_expression_warning (c);
expr = integer_zerop (c) ? *e3_p : *e2_p;
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_TYPES_COMPATIBLE_P:
{
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.set_error ();
break;
}
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
t2 = c_parser_type_name (parser);
if (t2 == NULL)
{
expr.set_error ();
break;
}
location_t close_paren_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
tree e1, e2;
e1 = groktypename (t1, NULL, NULL);
e2 = groktypename (t2, NULL, NULL);
if (e1 == error_mark_node || e2 == error_mark_node)
{
expr.set_error ();
break;
}
e1 = TYPE_MAIN_VARIANT (e1);
e2 = TYPE_MAIN_VARIANT (e2);
expr.value
= comptypes (e1, e2) ? integer_one_node : integer_zero_node;
set_c_expr_source_range (&expr, loc, close_paren_loc);
}
break;
case RID_BUILTIN_TGMATH:
{
vec<c_expr_t, va_gc> *cexpr_list;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_tgmath",
&cexpr_list, false,
&close_paren_loc))
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) < 3)
{
error_at (loc, "too few arguments to %<__builtin_tgmath%>");
expr.set_error ();
break;
}
unsigned int i;
c_expr_t *p;
FOR_EACH_VEC_ELT (*cexpr_list, i, p)
*p = convert_lvalue_to_rvalue (loc, *p, true, true);
unsigned int nargs = check_tgmath_function (&(*cexpr_list)[0], 1);
if (nargs == 0)
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) < nargs)
{
error_at (loc, "too few arguments to %<__builtin_tgmath%>");
expr.set_error ();
break;
}
unsigned int num_functions = vec_safe_length (cexpr_list) - nargs;
if (num_functions < 2)
{
error_at (loc, "too few arguments to %<__builtin_tgmath%>");
expr.set_error ();
break;
}
/* The first NUM_FUNCTIONS expressions are the function
pointers. The remaining NARGS expressions are the
arguments that are to be passed to one of those
functions, chosen following <tgmath.h> rules. */
for (unsigned int j = 1; j < num_functions; j++)
{
unsigned int this_nargs
= check_tgmath_function (&(*cexpr_list)[j], j + 1);
if (this_nargs == 0)
{
expr.set_error ();
goto out;
}
if (this_nargs != nargs)
{
error_at ((*cexpr_list)[j].get_location (),
"argument %u of %<__builtin_tgmath%> has "
"wrong number of arguments", j + 1);
expr.set_error ();
goto out;
}
}
/* The functions all have the same number of arguments.
Determine whether arguments and return types vary in
ways permitted for <tgmath.h> functions. */
/* The first entry in each of these vectors is for the
return type, subsequent entries for parameter
types. */
auto_vec<enum tgmath_parm_kind> parm_kind (nargs + 1);
auto_vec<tree> parm_first (nargs + 1);
auto_vec<bool> parm_complex (nargs + 1);
auto_vec<bool> parm_varies (nargs + 1);
tree first_type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[0].value));
tree first_ret = TYPE_MAIN_VARIANT (TREE_TYPE (first_type));
parm_first.quick_push (first_ret);
parm_complex.quick_push (TREE_CODE (first_ret) == COMPLEX_TYPE);
parm_varies.quick_push (false);
function_args_iterator iter;
tree t;
unsigned int argpos;
FOREACH_FUNCTION_ARGS (first_type, t, iter)
{
if (t == void_type_node)
break;
parm_first.quick_push (TYPE_MAIN_VARIANT (t));
parm_complex.quick_push (TREE_CODE (t) == COMPLEX_TYPE);
parm_varies.quick_push (false);
}
for (unsigned int j = 1; j < num_functions; j++)
{
tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
tree ret = TYPE_MAIN_VARIANT (TREE_TYPE (type));
if (ret != parm_first[0])
{
parm_varies[0] = true;
if (!SCALAR_FLOAT_TYPE_P (parm_first[0])
&& !COMPLEX_FLOAT_TYPE_P (parm_first[0]))
{
error_at ((*cexpr_list)[0].get_location (),
"invalid type-generic return type for "
"argument %u of %<__builtin_tgmath%>",
1);
expr.set_error ();
goto out;
}
if (!SCALAR_FLOAT_TYPE_P (ret)
&& !COMPLEX_FLOAT_TYPE_P (ret))
{
error_at ((*cexpr_list)[j].get_location (),
"invalid type-generic return type for "
"argument %u of %<__builtin_tgmath%>",
j + 1);
expr.set_error ();
goto out;
}
}
if (TREE_CODE (ret) == COMPLEX_TYPE)
parm_complex[0] = true;
argpos = 1;
FOREACH_FUNCTION_ARGS (type, t, iter)
{
if (t == void_type_node)
break;
t = TYPE_MAIN_VARIANT (t);
if (t != parm_first[argpos])
{
parm_varies[argpos] = true;
if (!SCALAR_FLOAT_TYPE_P (parm_first[argpos])
&& !COMPLEX_FLOAT_TYPE_P (parm_first[argpos]))
{
error_at ((*cexpr_list)[0].get_location (),
"invalid type-generic type for "
"argument %u of argument %u of "
"%<__builtin_tgmath%>", argpos, 1);
expr.set_error ();
goto out;
}
if (!SCALAR_FLOAT_TYPE_P (t)
&& !COMPLEX_FLOAT_TYPE_P (t))
{
error_at ((*cexpr_list)[j].get_location (),
"invalid type-generic type for "
"argument %u of argument %u of "
"%<__builtin_tgmath%>", argpos, j + 1);
expr.set_error ();
goto out;
}
}
if (TREE_CODE (t) == COMPLEX_TYPE)
parm_complex[argpos] = true;
argpos++;
}
}
enum tgmath_parm_kind max_variation = tgmath_fixed;
for (unsigned int j = 0; j <= nargs; j++)
{
enum tgmath_parm_kind this_kind;
if (parm_varies[j])
{
if (parm_complex[j])
max_variation = this_kind = tgmath_complex;
else
{
this_kind = tgmath_real;
if (max_variation != tgmath_complex)
max_variation = tgmath_real;
}
}
else
this_kind = tgmath_fixed;
parm_kind.quick_push (this_kind);
}
if (max_variation == tgmath_fixed)
{
error_at (loc, "function arguments of %<__builtin_tgmath%> "
"all have the same type");
expr.set_error ();
break;
}
/* Identify a parameter (not the return type) that varies,
including with complex types if any variation includes
complex types; there must be at least one such
parameter. */
unsigned int tgarg = 0;
for (unsigned int j = 1; j <= nargs; j++)
if (parm_kind[j] == max_variation)
{
tgarg = j;
break;
}
if (tgarg == 0)
{
error_at (loc, "function arguments of %<__builtin_tgmath%> "
"lack type-generic parameter");
expr.set_error ();
break;
}
/* Determine the type of the relevant parameter for each
function. */
auto_vec<tree> tg_type (num_functions);
for (unsigned int j = 0; j < num_functions; j++)
{
tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
argpos = 1;
FOREACH_FUNCTION_ARGS (type, t, iter)
{
if (argpos == tgarg)
{
tg_type.quick_push (TYPE_MAIN_VARIANT (t));
break;
}
argpos++;
}
}
/* Verify that the corresponding types are different for
all the listed functions. Also determine whether all
the types are complex, whether all the types are
standard or binary, and whether all the types are
decimal. */
bool all_complex = true;
bool all_binary = true;
bool all_decimal = true;
hash_set<tree> tg_types;
FOR_EACH_VEC_ELT (tg_type, i, t)
{
if (TREE_CODE (t) == COMPLEX_TYPE)
all_decimal = false;
else
{
all_complex = false;
if (DECIMAL_FLOAT_TYPE_P (t))
all_binary = false;
else
all_decimal = false;
}
if (tg_types.add (t))
{
error_at ((*cexpr_list)[i].get_location (),
"duplicate type-generic parameter type for "
"function argument %u of %<__builtin_tgmath%>",
i + 1);
expr.set_error ();
goto out;
}
}
/* Verify that other parameters and the return type whose
types vary have their types varying in the correct
way. */
for (unsigned int j = 0; j < num_functions; j++)
{
tree exp_type = tg_type[j];
tree exp_real_type = exp_type;
if (TREE_CODE (exp_type) == COMPLEX_TYPE)
exp_real_type = TREE_TYPE (exp_type);
tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
tree ret = TYPE_MAIN_VARIANT (TREE_TYPE (type));
if ((parm_kind[0] == tgmath_complex && ret != exp_type)
|| (parm_kind[0] == tgmath_real && ret != exp_real_type))
{
error_at ((*cexpr_list)[j].get_location (),
"bad return type for function argument %u "
"of %<__builtin_tgmath%>", j + 1);
expr.set_error ();
goto out;
}
argpos = 1;
FOREACH_FUNCTION_ARGS (type, t, iter)
{
if (t == void_type_node)
break;
t = TYPE_MAIN_VARIANT (t);
if ((parm_kind[argpos] == tgmath_complex
&& t != exp_type)
|| (parm_kind[argpos] == tgmath_real
&& t != exp_real_type))
{
error_at ((*cexpr_list)[j].get_location (),
"bad type for argument %u of "
"function argument %u of "
"%<__builtin_tgmath%>", argpos, j + 1);
expr.set_error ();
goto out;
}
argpos++;
}
}
/* The functions listed are a valid set of functions for a
<tgmath.h> macro to select between. Identify the
matching function, if any. First, the argument types
must be combined following <tgmath.h> rules. Integer
types are treated as _Decimal64 if any type-generic
argument is decimal, or if the only alternatives for
type-generic arguments are of decimal types, and are
otherwise treated as double (or _Complex double for
complex integer types, or _Float64 or _Complex _Float64
if all the return types are the same _FloatN or
_FloatNx type). After that adjustment, types are
combined following the usual arithmetic conversions.
If the function only accepts complex arguments, a
complex type is produced. */
bool arg_complex = all_complex;
bool arg_binary = all_binary;
bool arg_int_decimal = all_decimal;
for (unsigned int j = 1; j <= nargs; j++)
{
if (parm_kind[j] == tgmath_fixed)
continue;
c_expr_t *ce = &(*cexpr_list)[num_functions + j - 1];
tree type = TREE_TYPE (ce->value);
if (!INTEGRAL_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type)
&& TREE_CODE (type) != COMPLEX_TYPE)
{
error_at (ce->get_location (),
"invalid type of argument %u of type-generic "
"function", j);
expr.set_error ();
goto out;
}
if (DECIMAL_FLOAT_TYPE_P (type))
{
arg_int_decimal = true;
if (all_complex)
{
error_at (ce->get_location (),
"decimal floating-point argument %u to "
"complex-only type-generic function", j);
expr.set_error ();
goto out;
}
else if (all_binary)
{
error_at (ce->get_location (),
"decimal floating-point argument %u to "
"binary-only type-generic function", j);
expr.set_error ();
goto out;
}
else if (arg_complex)
{
error_at (ce->get_location (),
"both complex and decimal floating-point "
"arguments to type-generic function");
expr.set_error ();
goto out;
}
else if (arg_binary)
{
error_at (ce->get_location (),
"both binary and decimal floating-point "
"arguments to type-generic function");
expr.set_error ();
goto out;
}
}
else if (TREE_CODE (type) == COMPLEX_TYPE)
{
arg_complex = true;
if (COMPLEX_FLOAT_TYPE_P (type))
arg_binary = true;
if (all_decimal)
{
error_at (ce->get_location (),
"complex argument %u to "
"decimal-only type-generic function", j);
expr.set_error ();
goto out;
}
else if (arg_int_decimal)
{
error_at (ce->get_location (),
"both complex and decimal floating-point "
"arguments to type-generic function");
expr.set_error ();
goto out;
}
}
else if (SCALAR_FLOAT_TYPE_P (type))
{
arg_binary = true;
if (all_decimal)
{
error_at (ce->get_location (),
"binary argument %u to "
"decimal-only type-generic function", j);
expr.set_error ();
goto out;
}
else if (arg_int_decimal)
{
error_at (ce->get_location (),
"both binary and decimal floating-point "
"arguments to type-generic function");
expr.set_error ();
goto out;
}
}
}
/* For a macro rounding its result to a narrower type, map
integer types to _Float64 not double if the return type
is a _FloatN or _FloatNx type. */
bool arg_int_float64 = false;
if (parm_kind[0] == tgmath_fixed
&& SCALAR_FLOAT_TYPE_P (parm_first[0])
&& float64_type_node != NULL_TREE)
for (unsigned int j = 0; j < NUM_FLOATN_NX_TYPES; j++)
if (parm_first[0] == FLOATN_TYPE_NODE (j))
{
arg_int_float64 = true;
break;
}
tree arg_real = NULL_TREE;
for (unsigned int j = 1; j <= nargs; j++)
{
if (parm_kind[j] == tgmath_fixed)
continue;
c_expr_t *ce = &(*cexpr_list)[num_functions + j - 1];
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (ce->value));
if (TREE_CODE (type) == COMPLEX_TYPE)
type = TREE_TYPE (type);
if (INTEGRAL_TYPE_P (type))
type = (arg_int_decimal
? dfloat64_type_node
: arg_int_float64
? float64_type_node
: double_type_node);
if (arg_real == NULL_TREE)
arg_real = type;
else
arg_real = common_type (arg_real, type);
if (arg_real == error_mark_node)
{
expr.set_error ();
goto out;
}
}
tree arg_type = (arg_complex
? build_complex_type (arg_real)
: arg_real);
/* Look for a function to call with type-generic parameter
type ARG_TYPE. */
c_expr_t *fn = NULL;
for (unsigned int j = 0; j < num_functions; j++)
{
if (tg_type[j] == arg_type)
{
fn = &(*cexpr_list)[j];
break;
}
}
if (fn == NULL
&& parm_kind[0] == tgmath_fixed
&& SCALAR_FLOAT_TYPE_P (parm_first[0]))
{
/* Presume this is a macro that rounds its result to a
narrower type, and look for the first function with
at least the range and precision of the argument
type. */
for (unsigned int j = 0; j < num_functions; j++)
{
if (arg_complex
!= (TREE_CODE (tg_type[j]) == COMPLEX_TYPE))
continue;
tree real_tg_type = (arg_complex
? TREE_TYPE (tg_type[j])
: tg_type[j]);
if (DECIMAL_FLOAT_TYPE_P (arg_real)
!= DECIMAL_FLOAT_TYPE_P (real_tg_type))
continue;
scalar_float_mode arg_mode
= SCALAR_FLOAT_TYPE_MODE (arg_real);
scalar_float_mode tg_mode
= SCALAR_FLOAT_TYPE_MODE (real_tg_type);
const real_format *arg_fmt = REAL_MODE_FORMAT (arg_mode);
const real_format *tg_fmt = REAL_MODE_FORMAT (tg_mode);
if (arg_fmt->b == tg_fmt->b
&& arg_fmt->p <= tg_fmt->p
&& arg_fmt->emax <= tg_fmt->emax
&& (arg_fmt->emin - arg_fmt->p
>= tg_fmt->emin - tg_fmt->p))
{
fn = &(*cexpr_list)[j];
break;
}
}
}
if (fn == NULL)
{
error_at (loc, "no matching function for type-generic call");
expr.set_error ();
break;
}
/* Construct a call to FN. */
vec<tree, va_gc> *args;
vec_alloc (args, nargs);
vec<tree, va_gc> *origtypes;
vec_alloc (origtypes, nargs);
auto_vec<location_t> arg_loc (nargs);
for (unsigned int j = 0; j < nargs; j++)
{
c_expr_t *ce = &(*cexpr_list)[num_functions + j];
args->quick_push (ce->value);
arg_loc.quick_push (ce->get_location ());
origtypes->quick_push (ce->original_type);
}
expr.value = c_build_function_call_vec (loc, arg_loc, fn->value,
args, origtypes);
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_BUILTIN_CALL_WITH_STATIC_CHAIN:
{
vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t *e2_p;
tree chain_value;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_call_with_static_chain",
&cexpr_list, false,
&close_paren_loc))
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) != 2)
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_call_with_static_chain%>");
expr.set_error ();
break;
}
expr = (*cexpr_list)[0];
e2_p = &(*cexpr_list)[1];
*e2_p = convert_lvalue_to_rvalue (loc, *e2_p, true, true);
chain_value = e2_p->value;
mark_exp_read (chain_value);
if (TREE_CODE (expr.value) != CALL_EXPR)
error_at (loc, "first argument to "
"%<__builtin_call_with_static_chain%> "
"must be a call expression");
else if (TREE_CODE (TREE_TYPE (chain_value)) != POINTER_TYPE)
error_at (loc, "second argument to "
"%<__builtin_call_with_static_chain%> "
"must be a pointer type");
else
CALL_EXPR_STATIC_CHAIN (expr.value) = chain_value;
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_BUILTIN_COMPLEX:
{
vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t *e1_p, *e2_p;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_complex",
&cexpr_list, false,
&close_paren_loc))
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) != 2)
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_complex%>");
expr.set_error ();
break;
}
e1_p = &(*cexpr_list)[0];
e2_p = &(*cexpr_list)[1];
*e1_p = convert_lvalue_to_rvalue (loc, *e1_p, true, true);
if (TREE_CODE (e1_p->value) == EXCESS_PRECISION_EXPR)
e1_p->value = convert (TREE_TYPE (e1_p->value),
TREE_OPERAND (e1_p->value, 0));
*e2_p = convert_lvalue_to_rvalue (loc, *e2_p, true, true);
if (TREE_CODE (e2_p->value) == EXCESS_PRECISION_EXPR)
e2_p->value = convert (TREE_TYPE (e2_p->value),
TREE_OPERAND (e2_p->value, 0));
if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (e1_p->value))
|| DECIMAL_FLOAT_TYPE_P (TREE_TYPE (e1_p->value))
|| !SCALAR_FLOAT_TYPE_P (TREE_TYPE (e2_p->value))
|| DECIMAL_FLOAT_TYPE_P (TREE_TYPE (e2_p->value)))
{
error_at (loc, "%<__builtin_complex%> operand "
"not of real binary floating-point type");
expr.set_error ();
break;
}
if (TYPE_MAIN_VARIANT (TREE_TYPE (e1_p->value))
!= TYPE_MAIN_VARIANT (TREE_TYPE (e2_p->value)))
{
error_at (loc,
"%<__builtin_complex%> operands of different types");
expr.set_error ();
break;
}
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support complex types");
expr.value = build2_loc (loc, COMPLEX_EXPR,
build_complex_type
(TYPE_MAIN_VARIANT
(TREE_TYPE (e1_p->value))),
e1_p->value, e2_p->value);
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_BUILTIN_SHUFFLE:
{
vec<c_expr_t, va_gc> *cexpr_list;
unsigned int i;
c_expr_t *p;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_shuffle",
&cexpr_list, false,
&close_paren_loc))
{
expr.set_error ();
break;
}
FOR_EACH_VEC_SAFE_ELT (cexpr_list, i, p)
*p = convert_lvalue_to_rvalue (loc, *p, true, true);
if (vec_safe_length (cexpr_list) == 2)
expr.value =
c_build_vec_perm_expr
(loc, (*cexpr_list)[0].value,
NULL_TREE, (*cexpr_list)[1].value);
else if (vec_safe_length (cexpr_list) == 3)
expr.value =
c_build_vec_perm_expr
(loc, (*cexpr_list)[0].value,
(*cexpr_list)[1].value,
(*cexpr_list)[2].value);
else
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_shuffle%>");
expr.set_error ();
}
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_AT_SELECTOR:
{
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
tree sel = c_parser_objc_selector_arg (parser);
location_t close_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
expr.value = objc_build_selector_expr (loc, sel);
set_c_expr_source_range (&expr, loc, close_loc);
}
break;
case RID_AT_PROTOCOL:
{
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
location_t close_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
expr.value = objc_build_protocol_expr (id);
set_c_expr_source_range (&expr, loc, close_loc);
}
break;
case RID_AT_ENCODE:
{
/* Extension to support C-structures in the archiver. */
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.set_error ();
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
break;
}
location_t close_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
tree type = groktypename (t1, NULL, NULL);
expr.value = objc_build_encode_expr (type);
set_c_expr_source_range (&expr, loc, close_loc);
}
break;
case RID_GENERIC:
expr = c_parser_generic_selection (parser);
break;
default:
c_parser_error (parser, "expected expression");
expr.set_error ();
break;
}
break;
case CPP_OPEN_SQUARE:
if (c_dialect_objc ())
{
tree receiver, args;
c_parser_consume_token (parser);
receiver = c_parser_objc_receiver (parser);
args = c_parser_objc_message_args (parser);
location_t close_loc = c_parser_peek_token (parser)->location;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = objc_build_message_expr (receiver, args);
set_c_expr_source_range (&expr, loc, close_loc);
break;
}
/* Else fall through to report error. */
/* FALLTHRU */
default:
c_parser_error (parser, "expected expression");
expr.set_error ();
break;
}
out:
return c_parser_postfix_expression_after_primary
(parser, EXPR_LOC_OR_LOC (expr.value, loc), expr);
}
/* Parse a postfix expression after a parenthesized type name: the
brace-enclosed initializer of a compound literal, possibly followed
by some postfix operators. This is separate because it is not
possible to tell until after the type name whether a cast
expression has a cast or a compound literal, or whether the operand
of sizeof is a parenthesized type name or starts with a compound
literal. TYPE_LOC is the location where TYPE_NAME starts--the
location of the first token after the parentheses around the type
name. */
static struct c_expr
c_parser_postfix_expression_after_paren_type (c_parser *parser,
struct c_type_name *type_name,
location_t type_loc)
{
tree type;
struct c_expr init;
bool non_const;
struct c_expr expr;
location_t start_loc;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
check_compound_literal_type (type_loc, type_name);
rich_location richloc (line_table, type_loc);
start_init (NULL_TREE, NULL, 0, &richloc);
type = groktypename (type_name, &type_expr, &type_expr_const);
start_loc = c_parser_peek_token (parser)->location;
if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type))
{
error_at (type_loc, "compound literal has variable size");
type = error_mark_node;
}
init = c_parser_braced_init (parser, type, false, NULL);
finish_init ();
maybe_warn_string_init (type_loc, type, init);
if (type != error_mark_node
&& !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type))
&& current_function_decl)
{
error ("compound literal qualified by address-space qualifier");
type = error_mark_node;
}
pedwarn_c90 (start_loc, OPT_Wpedantic, "ISO C90 forbids compound literals");
non_const = ((init.value && TREE_CODE (init.value) == CONSTRUCTOR)
? CONSTRUCTOR_NON_CONST (init.value)
: init.original_code == C_MAYBE_CONST_EXPR);
non_const |= !type_expr_const;
unsigned int alignas_align = 0;
if (type != error_mark_node
&& type_name->specs->align_log != -1)
{
alignas_align = 1U << type_name->specs->align_log;
if (alignas_align < min_align_of_type (type))
{
error_at (type_name->specs->locations[cdw_alignas],
"%<_Alignas%> specifiers cannot reduce "
"alignment of compound literal");
alignas_align = 0;
}
}
expr.value = build_compound_literal (start_loc, type, init.value, non_const,
alignas_align);
set_c_expr_source_range (&expr, init.src_range);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
if (type != error_mark_node
&& expr.value != error_mark_node
&& type_expr)
{
if (TREE_CODE (expr.value) == C_MAYBE_CONST_EXPR)
{
gcc_assert (C_MAYBE_CONST_EXPR_PRE (expr.value) == NULL_TREE);
C_MAYBE_CONST_EXPR_PRE (expr.value) = type_expr;
}
else
{
gcc_assert (!non_const);
expr.value = build2 (C_MAYBE_CONST_EXPR, type,
type_expr, expr.value);
}
}
return c_parser_postfix_expression_after_primary (parser, start_loc, expr);
}
/* Callback function for sizeof_pointer_memaccess_warning to compare
types. */
static bool
sizeof_ptr_memacc_comptypes (tree type1, tree type2)
{
return comptypes (type1, type2) == 1;
}
/* Parse a postfix expression after the initial primary or compound
literal; that is, parse a series of postfix operators.
EXPR_LOC is the location of the primary expression. */
static struct c_expr
c_parser_postfix_expression_after_primary (c_parser *parser,
location_t expr_loc,
struct c_expr expr)
{
struct c_expr orig_expr;
tree ident, idx;
location_t sizeof_arg_loc[3], comp_loc;
tree sizeof_arg[3];
unsigned int literal_zero_mask;
unsigned int i;
vec<tree, va_gc> *exprlist;
vec<tree, va_gc> *origtypes = NULL;
vec<location_t> arg_loc = vNULL;
location_t start;
location_t finish;
while (true)
{
location_t op_loc = c_parser_peek_token (parser)->location;
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_SQUARE:
/* Array reference. */
c_parser_consume_token (parser);
idx = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
start = expr.get_start ();
finish = parser->tokens_buf[0].location;
expr.value = build_array_ref (op_loc, expr.value, idx);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
case CPP_OPEN_PAREN:
/* Function call. */
c_parser_consume_token (parser);
for (i = 0; i < 3; i++)
{
sizeof_arg[i] = NULL_TREE;
sizeof_arg_loc[i] = UNKNOWN_LOCATION;
}
literal_zero_mask = 0;
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
exprlist = NULL;
else
exprlist = c_parser_expr_list (parser, true, false, &origtypes,
sizeof_arg_loc, sizeof_arg,
&arg_loc, &literal_zero_mask);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
orig_expr = expr;
mark_exp_read (expr.value);
if (warn_sizeof_pointer_memaccess)
sizeof_pointer_memaccess_warning (sizeof_arg_loc,
expr.value, exprlist,
sizeof_arg,
sizeof_ptr_memacc_comptypes);
if (TREE_CODE (expr.value) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (expr.value) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (expr.value) == BUILT_IN_MEMSET
&& vec_safe_length (exprlist) == 3)
{
tree arg0 = (*exprlist)[0];
tree arg2 = (*exprlist)[2];
warn_for_memset (expr_loc, arg0, arg2, literal_zero_mask);
}
start = expr.get_start ();
finish = parser->tokens_buf[0].get_finish ();
expr.value
= c_build_function_call_vec (expr_loc, arg_loc, expr.value,
exprlist, origtypes);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) == INTEGER_CST
&& TREE_CODE (orig_expr.value) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (orig_expr.value) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (orig_expr.value) == BUILT_IN_CONSTANT_P)
expr.original_code = C_MAYBE_CONST_EXPR;
expr.original_type = NULL;
if (exprlist)
{
release_tree_vector (exprlist);
release_tree_vector (origtypes);
}
arg_loc.release ();
break;
case CPP_DOT:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr_loc, expr);
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *comp_tok = c_parser_peek_token (parser);
ident = comp_tok->value;
comp_loc = comp_tok->location;
}
else
{
c_parser_error (parser, "expected identifier");
expr.set_error ();
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
return expr;
}
start = expr.get_start ();
finish = c_parser_peek_token (parser)->get_finish ();
c_parser_consume_token (parser);
expr.value = build_component_ref (op_loc, expr.value, ident,
comp_loc);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) != COMPONENT_REF)
expr.original_type = NULL;
else
{
/* Remember the original type of a bitfield. */
tree field = TREE_OPERAND (expr.value, 1);
if (TREE_CODE (field) != FIELD_DECL)
expr.original_type = NULL;
else
expr.original_type = DECL_BIT_FIELD_TYPE (field);
}
break;
case CPP_DEREF:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, true, false);
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *comp_tok = c_parser_peek_token (parser);
ident = comp_tok->value;
comp_loc = comp_tok->location;
}
else
{
c_parser_error (parser, "expected identifier");
expr.set_error ();
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
return expr;
}
start = expr.get_start ();
finish = c_parser_peek_token (parser)->get_finish ();
c_parser_consume_token (parser);
expr.value = build_component_ref (op_loc,
build_indirect_ref (op_loc,
expr.value,
RO_ARROW),
ident, comp_loc);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) != COMPONENT_REF)
expr.original_type = NULL;
else
{
/* Remember the original type of a bitfield. */
tree field = TREE_OPERAND (expr.value, 1);
if (TREE_CODE (field) != FIELD_DECL)
expr.original_type = NULL;
else
expr.original_type = DECL_BIT_FIELD_TYPE (field);
}
break;
case CPP_PLUS_PLUS:
/* Postincrement. */
start = expr.get_start ();
finish = c_parser_peek_token (parser)->get_finish ();
c_parser_consume_token (parser);
expr = default_function_array_read_conversion (expr_loc, expr);
expr.value = build_unary_op (op_loc, POSTINCREMENT_EXPR,
expr.value, false);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
case CPP_MINUS_MINUS:
/* Postdecrement. */
start = expr.get_start ();
finish = c_parser_peek_token (parser)->get_finish ();
c_parser_consume_token (parser);
expr = default_function_array_read_conversion (expr_loc, expr);
expr.value = build_unary_op (op_loc, POSTDECREMENT_EXPR,
expr.value, false);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
default:
return expr;
}
}
}
/* Parse an expression (C90 6.3.17, C99 6.5.17, C11 6.5.17).
expression:
assignment-expression
expression , assignment-expression
*/
static struct c_expr
c_parser_expression (c_parser *parser)
{
location_t tloc = c_parser_peek_token (parser)->location;
struct c_expr expr;
expr = c_parser_expr_no_commas (parser, NULL);
if (c_parser_next_token_is (parser, CPP_COMMA))
expr = convert_lvalue_to_rvalue (tloc, expr, true, false);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
tree lhsval;
location_t loc = c_parser_peek_token (parser)->location;
location_t expr_loc;
c_parser_consume_token (parser);
expr_loc = c_parser_peek_token (parser)->location;
lhsval = expr.value;
while (TREE_CODE (lhsval) == COMPOUND_EXPR)
lhsval = TREE_OPERAND (lhsval, 1);
if (DECL_P (lhsval) || handled_component_p (lhsval))
mark_exp_read (lhsval);
next = c_parser_expr_no_commas (parser, NULL);
next = convert_lvalue_to_rvalue (expr_loc, next, true, false);
expr.value = build_compound_expr (loc, expr.value, next.value);
expr.original_code = COMPOUND_EXPR;
expr.original_type = next.original_type;
}
return expr;
}
/* Parse an expression and convert functions or arrays to pointers and
lvalues to rvalues. */
static struct c_expr
c_parser_expression_conv (c_parser *parser)
{
struct c_expr expr;
location_t loc = c_parser_peek_token (parser)->location;
expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (loc, expr, true, false);
return expr;
}
/* Helper function of c_parser_expr_list. Check if IDXth (0 based)
argument is a literal zero alone and if so, set it in literal_zero_mask. */
static inline void
c_parser_check_literal_zero (c_parser *parser, unsigned *literal_zero_mask,
unsigned int idx)
{
if (idx >= HOST_BITS_PER_INT)
return;
c_token *tok = c_parser_peek_token (parser);
switch (tok->type)
{
case CPP_NUMBER:
case CPP_CHAR:
case CPP_WCHAR:
case CPP_CHAR16:
case CPP_CHAR32:
/* If a parameter is literal zero alone, remember it
for -Wmemset-transposed-args warning. */
if (integer_zerop (tok->value)
&& !TREE_OVERFLOW (tok->value)
&& (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN))
*literal_zero_mask |= 1U << idx;
default:
break;
}
}
/* Parse a non-empty list of expressions. If CONVERT_P, convert
functions and arrays to pointers and lvalues to rvalues. If
FOLD_P, fold the expressions. If LOCATIONS is non-NULL, save the
locations of function arguments into this vector.
nonempty-expr-list:
assignment-expression
nonempty-expr-list , assignment-expression
*/
static vec<tree, va_gc> *
c_parser_expr_list (c_parser *parser, bool convert_p, bool fold_p,
vec<tree, va_gc> **p_orig_types,
location_t *sizeof_arg_loc, tree *sizeof_arg,
vec<location_t> *locations,
unsigned int *literal_zero_mask)
{
vec<tree, va_gc> *ret;
vec<tree, va_gc> *orig_types;
struct c_expr expr;
unsigned int idx = 0;
ret = make_tree_vector ();
if (p_orig_types == NULL)
orig_types = NULL;
else
orig_types = make_tree_vector ();
if (literal_zero_mask)
c_parser_check_literal_zero (parser, literal_zero_mask, 0);
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = convert_lvalue_to_rvalue (expr.get_location (), expr, true, true);
if (fold_p)
expr.value = c_fully_fold (expr.value, false, NULL);
ret->quick_push (expr.value);
if (orig_types)
orig_types->quick_push (expr.original_type);
if (locations)
locations->safe_push (expr.get_location ());
if (sizeof_arg != NULL
&& expr.original_code == SIZEOF_EXPR)
{
sizeof_arg[0] = c_last_sizeof_arg;
sizeof_arg_loc[0] = c_last_sizeof_loc;
}
while (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
if (literal_zero_mask)
c_parser_check_literal_zero (parser, literal_zero_mask, idx + 1);
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = convert_lvalue_to_rvalue (expr.get_location (), expr, true,
true);
if (fold_p)
expr.value = c_fully_fold (expr.value, false, NULL);
vec_safe_push (ret, expr.value);
if (orig_types)
vec_safe_push (orig_types, expr.original_type);
if (locations)
locations->safe_push (expr.get_location ());
if (++idx < 3
&& sizeof_arg != NULL
&& expr.original_code == SIZEOF_EXPR)
{
sizeof_arg[idx] = c_last_sizeof_arg;
sizeof_arg_loc[idx] = c_last_sizeof_loc;
}
}
if (orig_types)
*p_orig_types = orig_types;
return ret;
}
/* Parse Objective-C-specific constructs. */
/* Parse an objc-class-definition.
objc-class-definition:
@interface identifier objc-superclass[opt] objc-protocol-refs[opt]
objc-class-instance-variables[opt] objc-methodprotolist @end
@implementation identifier objc-superclass[opt]
objc-class-instance-variables[opt]
@interface identifier ( identifier ) objc-protocol-refs[opt]
objc-methodprotolist @end
@interface identifier ( ) objc-protocol-refs[opt]
objc-methodprotolist @end
@implementation identifier ( identifier )
objc-superclass:
: identifier
"@interface identifier (" must start "@interface identifier (
identifier ) ...": objc-methodprotolist in the first production may
not start with a parenthesized identifier as a declarator of a data
definition with no declaration specifiers if the objc-superclass,
objc-protocol-refs and objc-class-instance-variables are omitted. */
static void
c_parser_objc_class_definition (c_parser *parser, tree attributes)
{
bool iface_p;
tree id1;
tree superclass;
if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE))
iface_p = true;
else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION))
iface_p = false;
else
gcc_unreachable ();
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
/* We have a category or class extension. */
tree id2;
tree proto = NULL_TREE;
matching_parens parens;
parens.consume_open (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
if (iface_p && c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
/* We have a class extension. */
id2 = NULL_TREE;
}
else
{
c_parser_error (parser, "expected identifier or %<)%>");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return;
}
}
else
{
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
parens.skip_until_found_close (parser);
if (!iface_p)
{
objc_start_category_implementation (id1, id2);
return;
}
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_start_category_interface (id1, id2, proto, attributes);
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
return;
}
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
superclass = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
superclass = NULL_TREE;
if (iface_p)
{
tree proto = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_start_class_interface (id1, superclass, proto, attributes);
}
else
objc_start_class_implementation (id1, superclass);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
c_parser_objc_class_instance_variables (parser);
if (iface_p)
{
objc_continue_interface ();
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
}
else
{
objc_continue_implementation ();
return;
}
}
/* Parse objc-class-instance-variables.
objc-class-instance-variables:
{ objc-instance-variable-decl-list[opt] }
objc-instance-variable-decl-list:
objc-visibility-spec
objc-instance-variable-decl ;
;
objc-instance-variable-decl-list objc-visibility-spec
objc-instance-variable-decl-list objc-instance-variable-decl ;
objc-instance-variable-decl-list ;
objc-visibility-spec:
@private
@protected
@public
objc-instance-variable-decl:
struct-declaration
*/
static void
c_parser_objc_class_instance_variables (c_parser *parser)
{
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
c_parser_consume_token (parser);
while (c_parser_next_token_is_not (parser, CPP_EOF))
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"extra semicolon");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the instance variables. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Parse any objc-visibility-spec. */
if (c_parser_next_token_is_keyword (parser, RID_AT_PRIVATE))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PRIVATE);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTECTED))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PROTECTED);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PUBLIC))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PUBLIC);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PACKAGE))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PACKAGE);
continue;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_external, NULL);
continue;
}
/* Parse some comma-separated declarations. */
decls = c_parser_struct_declaration (parser);
if (decls == NULL)
{
/* There is a syntax error. We want to skip the offending
tokens up to the next ';' (included) or '}'
(excluded). */
/* First, skip manually a ')' or ']'. This is because they
reduce the nesting level, so c_parser_skip_until_found()
wouldn't be able to skip past them. */
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE)
c_parser_consume_token (parser);
/* Then, do the standard skipping. */
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
/* We hopefully recovered. Start normal parsing again. */
parser->error = false;
continue;
}
else
{
/* Comma-separated instance variables are chained together
in reverse order; add them one by one. */
tree ivar = nreverse (decls);
for (; ivar; ivar = DECL_CHAIN (ivar))
objc_add_instance_variable (copy_node (ivar));
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
/* Parse an objc-class-declaration.
objc-class-declaration:
@class identifier-list ;
*/
static void
c_parser_objc_class_declaration (c_parser *parser)
{
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_CLASS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
id = c_parser_peek_token (parser)->value;
objc_declare_class (id);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse an objc-alias-declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
*/
static void
c_parser_objc_alias_declaration (c_parser *parser)
{
tree id1, id2;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_alias (id1, id2);
}
/* Parse an objc-protocol-definition.
objc-protocol-definition:
@protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end
@protocol identifier-list ;
"@protocol identifier ;" should be resolved as "@protocol
identifier-list ;": objc-methodprotolist may not start with a
semicolon in the first alternative if objc-protocol-refs are
omitted. */
static void
c_parser_objc_protocol_definition (c_parser *parser, tree attributes)
{
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON)
{
/* Any identifiers, including those declared as type names, are
OK here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
objc_declare_protocol (id, attributes);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
else
{
tree id = c_parser_peek_token (parser)->value;
tree proto = NULL_TREE;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
parser->objc_pq_context = true;
objc_start_protocol (id, proto, attributes);
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
parser->objc_pq_context = false;
objc_finish_interface ();
}
}
/* Parse an objc-method-type.
objc-method-type:
+
-
Return true if it is a class method (+) and false if it is
an instance method (-).
*/
static inline bool
c_parser_objc_method_type (c_parser *parser)
{
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
c_parser_consume_token (parser);
return true;
case CPP_MINUS:
c_parser_consume_token (parser);
return false;
default:
gcc_unreachable ();
}
}
/* Parse an objc-method-definition.
objc-method-definition:
objc-method-type objc-method-decl ;[opt] compound-statement
*/
static void
c_parser_objc_method_definition (c_parser *parser)
{
bool is_class_method = c_parser_objc_method_type (parser);
tree decl, attributes = NULL_TREE, expr = NULL_TREE;
parser->objc_pq_context = true;
decl = c_parser_objc_method_decl (parser, is_class_method, &attributes,
&expr);
if (decl == error_mark_node)
return; /* Bail here. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"extra semicolon in method definition specified");
}
if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_error (parser, "expected %<{%>");
return;
}
parser->objc_pq_context = false;
if (objc_start_method_definition (is_class_method, decl, attributes, expr))
{
add_stmt (c_parser_compound_statement (parser));
objc_finish_method_definition (current_function_decl);
}
else
{
/* This code is executed when we find a method definition
outside of an @implementation context (or invalid for other
reasons). Parse the method (to keep going) but do not emit
any code.
*/
c_parser_compound_statement (parser);
}
}
/* Parse an objc-methodprotolist.
objc-methodprotolist:
empty
objc-methodprotolist objc-methodproto
objc-methodprotolist declaration
objc-methodprotolist ;
@optional
@required
The declaration is a data definition, which may be missing
declaration specifiers under the same rules and diagnostics as
other data definitions outside functions, and the stray semicolon
is diagnosed the same way as a stray semicolon outside a
function. */
static void
c_parser_objc_methodprotolist (c_parser *parser)
{
while (true)
{
/* The list is terminated by @end. */
switch (c_parser_peek_token (parser)->type)
{
case CPP_SEMICOLON:
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PLUS:
case CPP_MINUS:
c_parser_objc_methodproto (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_external, NULL);
break;
case CPP_EOF:
return;
default:
if (c_parser_next_token_is_keyword (parser, RID_AT_END))
return;
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY))
c_parser_objc_at_property_declaration (parser);
else if (c_parser_next_token_is_keyword (parser, RID_AT_OPTIONAL))
{
objc_set_method_opt (true);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_REQUIRED))
{
objc_set_method_opt (false);
c_parser_consume_token (parser);
}
else
c_parser_declaration_or_fndef (parser, false, false, true,
false, true, NULL, vNULL);
break;
}
}
}
/* Parse an objc-methodproto.
objc-methodproto:
objc-method-type objc-method-decl ;
*/
static void
c_parser_objc_methodproto (c_parser *parser)
{
bool is_class_method = c_parser_objc_method_type (parser);
tree decl, attributes = NULL_TREE;
/* Remember protocol qualifiers in prototypes. */
parser->objc_pq_context = true;
decl = c_parser_objc_method_decl (parser, is_class_method, &attributes,
NULL);
/* Forget protocol qualifiers now. */
parser->objc_pq_context = false;
/* Do not allow the presence of attributes to hide an erroneous
method implementation in the interface section. */
if (!c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_error (parser, "expected %<;%>");
return;
}
if (decl != error_mark_node)
objc_add_method_declaration (is_class_method, decl, attributes);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* If we are at a position that method attributes may be present, check that
there are not any parsed already (a syntax error) and then collect any
specified at the current location. Finally, if new attributes were present,
check that the next token is legal ( ';' for decls and '{' for defs). */
static bool
c_parser_objc_maybe_method_attributes (c_parser* parser, tree* attributes)
{
bool bad = false;
if (*attributes)
{
c_parser_error (parser,
"method attributes must be specified at the end only");
*attributes = NULL_TREE;
bad = true;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
*attributes = c_parser_attributes (parser);
/* If there were no attributes here, just report any earlier error. */
if (*attributes == NULL_TREE || bad)
return bad;
/* If the attributes are followed by a ; or {, then just report any earlier
error. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return bad;
/* We've got attributes, but not at the end. */
c_parser_error (parser,
"expected %<;%> or %<{%> after method attribute definition");
return true;
}
/* Parse an objc-method-decl.
objc-method-decl:
( objc-type-name ) objc-selector
objc-selector
( objc-type-name ) objc-keyword-selector objc-optparmlist
objc-keyword-selector objc-optparmlist
attributes
objc-keyword-selector:
objc-keyword-decl
objc-keyword-selector objc-keyword-decl
objc-keyword-decl:
objc-selector : ( objc-type-name ) identifier
objc-selector : identifier
: ( objc-type-name ) identifier
: identifier
objc-optparmlist:
objc-optparms objc-optellipsis
objc-optparms:
empty
objc-opt-parms , parameter-declaration
objc-optellipsis:
empty
, ...
*/
static tree
c_parser_objc_method_decl (c_parser *parser, bool is_class_method,
tree *attributes, tree *expr)
{
tree type = NULL_TREE;
tree sel;
tree parms = NULL_TREE;
bool ellipsis = false;
bool attr_err = false;
*attributes = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
matching_parens parens;
parens.consume_open (parser);
type = c_parser_objc_type_name (parser);
parens.skip_until_found_close (parser);
}
sel = c_parser_objc_selector (parser);
/* If there is no selector, or a colon follows, we have an
objc-keyword-selector. If there is a selector, and a colon does
not follow, that selector ends the objc-method-decl. */
if (!sel || c_parser_next_token_is (parser, CPP_COLON))
{
tree tsel = sel;
tree list = NULL_TREE;
while (true)
{
tree atype = NULL_TREE, id, keyworddecl;
tree param_attr = NULL_TREE;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
break;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
atype = c_parser_objc_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
}
/* New ObjC allows attributes on method parameters. */
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
param_attr = c_parser_attributes (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return error_mark_node;
}
id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
keyworddecl = objc_build_keyword_decl (tsel, atype, id, param_attr);
list = chainon (list, keyworddecl);
tsel = c_parser_objc_selector (parser);
if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ;
/* Parse the optional parameter list. Optional Objective-C
method parameters follow the C syntax, and may include '...'
to denote a variable number of arguments. */
parms = make_node (TREE_LIST);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_parm *parm;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
ellipsis = true;
c_parser_consume_token (parser);
attr_err |= c_parser_objc_maybe_method_attributes
(parser, attributes) ;
break;
}
parm = c_parser_parameter_declaration (parser, NULL_TREE);
if (parm == NULL)
break;
parms = chainon (parms,
build_tree_list (NULL_TREE, grokparm (parm, expr)));
}
sel = list;
}
else
attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ;
if (sel == NULL)
{
c_parser_error (parser, "objective-c method declaration is expected");
return error_mark_node;
}
if (attr_err)
return error_mark_node;
return objc_build_method_signature (is_class_method, type, sel, parms, ellipsis);
}
/* Parse an objc-type-name.
objc-type-name:
objc-type-qualifiers[opt] type-name
objc-type-qualifiers[opt]
objc-type-qualifiers:
objc-type-qualifier
objc-type-qualifiers objc-type-qualifier
objc-type-qualifier: one of
in out inout bycopy byref oneway
*/
static tree
c_parser_objc_type_name (c_parser *parser)
{
tree quals = NULL_TREE;
struct c_type_name *type_name = NULL;
tree type = NULL_TREE;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_KEYWORD
&& (token->keyword == RID_IN
|| token->keyword == RID_OUT
|| token->keyword == RID_INOUT
|| token->keyword == RID_BYCOPY
|| token->keyword == RID_BYREF
|| token->keyword == RID_ONEWAY))
{
quals = chainon (build_tree_list (NULL_TREE, token->value), quals);
c_parser_consume_token (parser);
}
else
break;
}
if (c_parser_next_tokens_start_typename (parser, cla_prefer_type))
type_name = c_parser_type_name (parser);
if (type_name)
type = groktypename (type_name, NULL, NULL);
/* If the type is unknown, and error has already been produced and
we need to recover from the error. In that case, use NULL_TREE
for the type, as if no type had been specified; this will use the
default type ('id') which is good for error recovery. */
if (type == error_mark_node)
type = NULL_TREE;
return build_tree_list (quals, type);
}
/* Parse objc-protocol-refs.
objc-protocol-refs:
< identifier-list >
*/
static tree
c_parser_objc_protocol_refs (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is (parser, CPP_LESS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_require (parser, CPP_GREATER, "expected %<>%>");
return list;
}
/* Parse an objc-try-catch-finally-statement.
objc-try-catch-finally-statement:
@try compound-statement objc-catch-list[opt]
@try compound-statement objc-catch-list[opt] @finally compound-statement
objc-catch-list:
@catch ( objc-catch-parameter-declaration ) compound-statement
objc-catch-list @catch ( objc-catch-parameter-declaration ) compound-statement
objc-catch-parameter-declaration:
parameter-declaration
'...'
where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS.
PS: This function is identical to cp_parser_objc_try_catch_finally_statement
for C++. Keep them in sync. */
static void
c_parser_objc_try_catch_finally_statement (c_parser *parser)
{
location_t location;
tree stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_TRY));
c_parser_consume_token (parser);
location = c_parser_peek_token (parser)->location;
objc_maybe_warn_exceptions (location);
stmt = c_parser_compound_statement (parser);
objc_begin_try_stmt (location, stmt);
while (c_parser_next_token_is_keyword (parser, RID_AT_CATCH))
{
struct c_parm *parm;
tree parameter_declaration = error_mark_node;
bool seen_open_paren = false;
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
seen_open_paren = true;
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
/* We have "@catch (...)" (where the '...' are literally
what is in the code). Skip the '...'.
parameter_declaration is set to NULL_TREE, and
objc_being_catch_clauses() knows that that means
'...'. */
c_parser_consume_token (parser);
parameter_declaration = NULL_TREE;
}
else
{
/* We have "@catch (NSException *exception)" or something
like that. Parse the parameter declaration. */
parm = c_parser_parameter_declaration (parser, NULL_TREE);
if (parm == NULL)
parameter_declaration = error_mark_node;
else
parameter_declaration = grokparm (parm, NULL);
}
if (seen_open_paren)
parens.require_close (parser);
else
{
/* If there was no open parenthesis, we are recovering from
an error, and we are trying to figure out what mistake
the user has made. */
/* If there is an immediate closing parenthesis, the user
probably forgot the opening one (ie, they typed "@catch
NSException *e)". Parse the closing parenthesis and keep
going. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
/* If these is no immediate closing parenthesis, the user
probably doesn't know that parenthesis are required at
all (ie, they typed "@catch NSException *e"). So, just
forget about the closing parenthesis and keep going. */
}
objc_begin_catch_clause (parameter_declaration);
if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
c_parser_compound_statement_nostart (parser);
objc_finish_catch_clause ();
}
if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY))
{
c_parser_consume_token (parser);
location = c_parser_peek_token (parser)->location;
stmt = c_parser_compound_statement (parser);
objc_build_finally_clause (location, stmt);
}
objc_finish_try_stmt ();
}
/* Parse an objc-synchronized-statement.
objc-synchronized-statement:
@synchronized ( expression ) compound-statement
*/
static void
c_parser_objc_synchronized_statement (c_parser *parser)
{
location_t loc;
tree expr, stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED));
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
objc_maybe_warn_exceptions (loc);
matching_parens parens;
if (parens.require_open (parser))
{
struct c_expr ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (loc, ce, false, false);
expr = ce.value;
expr = c_fully_fold (expr, false, NULL);
parens.skip_until_found_close (parser);
}
else
expr = error_mark_node;
stmt = c_parser_compound_statement (parser);
objc_build_synchronized (loc, expr, stmt);
}
/* Parse an objc-selector; return NULL_TREE without an error if the
next token is not an objc-selector.
objc-selector:
identifier
one of
enum struct union if else while do for switch case default
break continue return goto asm sizeof typeof __alignof
unsigned long const short volatile signed restrict _Complex
in out inout bycopy byref oneway int char float double void _Bool
_Atomic
??? Why this selection of keywords but not, for example, storage
class specifiers? */
static tree
c_parser_objc_selector (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
tree value = token->value;
if (token->type == CPP_NAME)
{
c_parser_consume_token (parser);
return value;
}
if (token->type != CPP_KEYWORD)
return NULL_TREE;
switch (token->keyword)
{
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_IF:
case RID_ELSE:
case RID_WHILE:
case RID_DO:
case RID_FOR:
case RID_SWITCH:
case RID_CASE:
case RID_DEFAULT:
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
case RID_ASM:
case RID_SIZEOF:
case RID_TYPEOF:
case RID_ALIGNOF:
case RID_UNSIGNED:
case RID_LONG:
case RID_CONST:
case RID_SHORT:
case RID_VOLATILE:
case RID_SIGNED:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_IN:
case RID_OUT:
case RID_INOUT:
case RID_BYCOPY:
case RID_BYREF:
case RID_ONEWAY:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
CASE_RID_FLOATN_NX:
case RID_VOID:
case RID_BOOL:
case RID_ATOMIC:
case RID_AUTO_TYPE:
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
c_parser_consume_token (parser);
return value;
default:
return NULL_TREE;
}
}
/* Parse an objc-selector-arg.
objc-selector-arg:
objc-selector
objc-keywordname-list
objc-keywordname-list:
objc-keywordname
objc-keywordname-list objc-keywordname
objc-keywordname:
objc-selector :
:
*/
static tree
c_parser_objc_selector_arg (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return list;
list = chainon (list, build_tree_list (sel, NULL_TREE));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-receiver.
objc-receiver:
expression
class-name
type-name
*/
static tree
c_parser_objc_receiver (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->type == CPP_NAME
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
return objc_get_class_reference (id);
}
struct c_expr ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (loc, ce, false, false);
return c_fully_fold (ce.value, false, NULL);
}
/* Parse objc-message-args.
objc-message-args:
objc-selector
objc-keywordarg-list
objc-keywordarg-list:
objc-keywordarg
objc-keywordarg-list objc-keywordarg
objc-keywordarg:
objc-selector : objc-keywordexpr
: objc-keywordexpr
*/
static tree
c_parser_objc_message_args (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
tree keywordexpr;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return error_mark_node;
keywordexpr = c_parser_objc_keywordexpr (parser);
list = chainon (list, build_tree_list (sel, keywordexpr));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-keywordexpr.
objc-keywordexpr:
nonempty-expr-list
*/
static tree
c_parser_objc_keywordexpr (c_parser *parser)
{
tree ret;
vec<tree, va_gc> *expr_list = c_parser_expr_list (parser, true, true,
NULL, NULL, NULL, NULL);
if (vec_safe_length (expr_list) == 1)
{
/* Just return the expression, remove a level of
indirection. */
ret = (*expr_list)[0];
}
else
{
/* We have a comma expression, we will collapse later. */
ret = build_tree_list_vec (expr_list);
}
release_tree_vector (expr_list);
return ret;
}
/* A check, needed in several places, that ObjC interface, implementation or
method definitions are not prefixed by incorrect items. */
static bool
c_parser_objc_diagnose_bad_element_prefix (c_parser *parser,
struct c_declspecs *specs)
{
if (!specs->declspecs_seen_p || specs->non_sc_seen_p
|| specs->typespec_kind != ctsk_none)
{
c_parser_error (parser,
"no type or storage class may be specified here,");
c_parser_skip_to_end_of_block_or_statement (parser);
return true;
}
return false;
}
/* Parse an Objective-C @property declaration. The syntax is:
objc-property-declaration:
'@property' objc-property-attributes[opt] struct-declaration ;
objc-property-attributes:
'(' objc-property-attribute-list ')'
objc-property-attribute-list:
objc-property-attribute
objc-property-attribute-list, objc-property-attribute
objc-property-attribute
'getter' = identifier
'setter' = identifier
'readonly'
'readwrite'
'assign'
'retain'
'copy'
'nonatomic'
For example:
@property NSString *name;
@property (readonly) id object;
@property (retain, nonatomic, getter=getTheName) id name;
@property int a, b, c;
PS: This function is identical to cp_parser_objc_at_propery_declaration
for C++. Keep them in sync. */
static void
c_parser_objc_at_property_declaration (c_parser *parser)
{
/* The following variables hold the attributes of the properties as
parsed. They are 'false' or 'NULL_TREE' if the attribute was not
seen. When we see an attribute, we set them to 'true' (if they
are boolean properties) or to the identifier (if they have an
argument, ie, for getter and setter). Note that here we only
parse the list of attributes, check the syntax and accumulate the
attributes that we find. objc_add_property_declaration() will
then process the information. */
bool property_assign = false;
bool property_copy = false;
tree property_getter_ident = NULL_TREE;
bool property_nonatomic = false;
bool property_readonly = false;
bool property_readwrite = false;
bool property_retain = false;
tree property_setter_ident = NULL_TREE;
/* 'properties' is the list of properties that we read. Usually a
single one, but maybe more (eg, in "@property int a, b, c;" there
are three). */
tree properties;
location_t loc;
loc = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY));
c_parser_consume_token (parser); /* Eat '@property'. */
/* Parse the optional attribute list... */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
matching_parens parens;
/* Eat the '(' */
parens.consume_open (parser);
/* Property attribute keywords are valid now. */
parser->objc_property_attr_context = true;
while (true)
{
bool syntax_error = false;
c_token *token = c_parser_peek_token (parser);
enum rid keyword;
if (token->type != CPP_KEYWORD)
{
if (token->type == CPP_CLOSE_PAREN)
c_parser_error (parser, "expected identifier");
else
{
c_parser_consume_token (parser);
c_parser_error (parser, "unknown property attribute");
}
break;
}
keyword = token->keyword;
c_parser_consume_token (parser);
switch (keyword)
{
case RID_ASSIGN: property_assign = true; break;
case RID_COPY: property_copy = true; break;
case RID_NONATOMIC: property_nonatomic = true; break;
case RID_READONLY: property_readonly = true; break;
case RID_READWRITE: property_readwrite = true; break;
case RID_RETAIN: property_retain = true; break;
case RID_GETTER:
case RID_SETTER:
if (c_parser_next_token_is_not (parser, CPP_EQ))
{
if (keyword == RID_GETTER)
c_parser_error (parser,
"missing %<=%> (after %<getter%> attribute)");
else
c_parser_error (parser,
"missing %<=%> (after %<setter%> attribute)");
syntax_error = true;
break;
}
c_parser_consume_token (parser); /* eat the = */
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
syntax_error = true;
break;
}
if (keyword == RID_SETTER)
{
if (property_setter_ident != NULL_TREE)
c_parser_error (parser, "the %<setter%> attribute may only be specified once");
else
property_setter_ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COLON))
c_parser_error (parser, "setter name must terminate with %<:%>");
else
c_parser_consume_token (parser);
}
else
{
if (property_getter_ident != NULL_TREE)
c_parser_error (parser, "the %<getter%> attribute may only be specified once");
else
property_getter_ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
break;
default:
c_parser_error (parser, "unknown property attribute");
syntax_error = true;
break;
}
if (syntax_error)
break;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
parser->objc_property_attr_context = false;
parens.skip_until_found_close (parser);
}
/* ... and the property declaration(s). */
properties = c_parser_struct_declaration (parser);
if (properties == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
if (properties == NULL_TREE)
c_parser_error (parser, "expected identifier");
else
{
/* Comma-separated properties are chained together in
reverse order; add them one by one. */
properties = nreverse (properties);
for (; properties; properties = TREE_CHAIN (properties))
objc_add_property_declaration (loc, copy_node (properties),
property_readonly, property_readwrite,
property_assign, property_retain,
property_copy, property_nonatomic,
property_getter_ident, property_setter_ident);
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
parser->error = false;
}
/* Parse an Objective-C @synthesize declaration. The syntax is:
objc-synthesize-declaration:
@synthesize objc-synthesize-identifier-list ;
objc-synthesize-identifier-list:
objc-synthesize-identifier
objc-synthesize-identifier-list, objc-synthesize-identifier
objc-synthesize-identifier
identifier
identifier = identifier
For example:
@synthesize MyProperty;
@synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty;
PS: This function is identical to cp_parser_objc_at_synthesize_declaration
for C++. Keep them in sync.
*/
static void
c_parser_objc_at_synthesize_declaration (c_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNTHESIZE));
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
while (true)
{
tree property, ivar;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
/* Once we find the semicolon, we can resume normal parsing.
We have to reset parser->error manually because
c_parser_skip_until_found() won't reset it for us if the
next token is precisely a semicolon. */
parser->error = false;
return;
}
property = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
ivar = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
ivar = NULL_TREE;
list = chainon (list, build_tree_list (ivar, property));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_add_synthesize_declaration (loc, list);
}
/* Parse an Objective-C @dynamic declaration. The syntax is:
objc-dynamic-declaration:
@dynamic identifier-list ;
For example:
@dynamic MyProperty;
@dynamic MyProperty, AnotherProperty;
PS: This function is identical to cp_parser_objc_at_dynamic_declaration
for C++. Keep them in sync.
*/
static void
c_parser_objc_at_dynamic_declaration (c_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_DYNAMIC));
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
while (true)
{
tree property;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
property = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, property));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_add_dynamic_declaration (loc, list);
}
/* Parse a pragma GCC ivdep. */
static bool
c_parse_pragma_ivdep (c_parser *parser)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
return true;
}
/* Parse a pragma GCC unroll. */
static unsigned short
c_parser_pragma_unroll (c_parser *parser)
{
unsigned short unroll;
c_parser_consume_pragma (parser);
location_t location = c_parser_peek_token (parser)->location;
tree expr = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (expr);
expr = c_fully_fold (expr, false, NULL);
HOST_WIDE_INT lunroll = 0;
if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
|| TREE_CODE (expr) != INTEGER_CST
|| (lunroll = tree_to_shwi (expr)) < 0
|| lunroll >= USHRT_MAX)
{
error_at (location, "%<#pragma GCC unroll%> requires an"
" assignment-expression that evaluates to a non-negative"
" integral constant less than %u", USHRT_MAX);
unroll = 0;
}
else
{
unroll = (unsigned short)lunroll;
if (unroll == 0)
unroll = 1;
}
c_parser_skip_to_pragma_eol (parser);
return unroll;
}
/* Handle pragmas. Some OpenMP pragmas are associated with, and therefore
should be considered, statements. ALLOW_STMT is true if we're within
the context of a function and such pragmas are to be allowed. Returns
true if we actually parsed such a pragma. */
static bool
c_parser_pragma (c_parser *parser, enum pragma_context context, bool *if_p)
{
unsigned int id;
const char *construct = NULL;
id = c_parser_peek_token (parser)->pragma_kind;
gcc_assert (id != PRAGMA_NONE);
switch (id)
{
case PRAGMA_OACC_DECLARE:
c_parser_oacc_declare (parser);
return false;
case PRAGMA_OACC_ENTER_DATA:
if (context != pragma_compound)
{
construct = "acc enter data";
in_compound:
if (context == pragma_stmt)
{
error_at (c_parser_peek_token (parser)->location,
"%<#pragma %s%> may only be used in compound "
"statements", construct);
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
goto bad_stmt;
}
c_parser_oacc_enter_exit_data (parser, true);
return false;
case PRAGMA_OACC_EXIT_DATA:
if (context != pragma_compound)
{
construct = "acc exit data";
goto in_compound;
}
c_parser_oacc_enter_exit_data (parser, false);
return false;
case PRAGMA_OACC_ROUTINE:
if (context != pragma_external)
{
error_at (c_parser_peek_token (parser)->location,
"%<#pragma acc routine%> must be at file scope");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
c_parser_oacc_routine (parser, context);
return false;
case PRAGMA_OACC_UPDATE:
if (context != pragma_compound)
{
construct = "acc update";
goto in_compound;
}
c_parser_oacc_update (parser);
return false;
case PRAGMA_OMP_BARRIER:
if (context != pragma_compound)
{
construct = "omp barrier";
goto in_compound;
}
c_parser_omp_barrier (parser);
return false;
case PRAGMA_OMP_FLUSH:
if (context != pragma_compound)
{
construct = "omp flush";
goto in_compound;
}
c_parser_omp_flush (parser);
return false;
case PRAGMA_OMP_TASKWAIT:
if (context != pragma_compound)
{
construct = "omp taskwait";
goto in_compound;
}
c_parser_omp_taskwait (parser);
return false;
case PRAGMA_OMP_TASKYIELD:
if (context != pragma_compound)
{
construct = "omp taskyield";
goto in_compound;
}
c_parser_omp_taskyield (parser);
return false;
case PRAGMA_OMP_CANCEL:
if (context != pragma_compound)
{
construct = "omp cancel";
goto in_compound;
}
c_parser_omp_cancel (parser);
return false;
case PRAGMA_OMP_CANCELLATION_POINT:
c_parser_omp_cancellation_point (parser, context);
return false;
case PRAGMA_OMP_THREADPRIVATE:
c_parser_omp_threadprivate (parser);
return false;
case PRAGMA_OMP_TARGET:
return c_parser_omp_target (parser, context, if_p);
case PRAGMA_OMP_END_DECLARE_TARGET:
c_parser_omp_end_declare_target (parser);
return false;
case PRAGMA_OMP_SECTION:
error_at (c_parser_peek_token (parser)->location,
"%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
case PRAGMA_OMP_DECLARE:
c_parser_omp_declare (parser, context);
return false;
case PRAGMA_OMP_ORDERED:
return c_parser_omp_ordered (parser, context, if_p);
case PRAGMA_IVDEP:
{
const bool ivdep = c_parse_pragma_ivdep (parser);
unsigned short unroll;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_UNROLL)
unroll = c_parser_pragma_unroll (parser);
else
unroll = 0;
if (!c_parser_next_token_is_keyword (parser, RID_FOR)
&& !c_parser_next_token_is_keyword (parser, RID_WHILE)
&& !c_parser_next_token_is_keyword (parser, RID_DO))
{
c_parser_error (parser, "for, while or do statement expected");
return false;
}
if (c_parser_next_token_is_keyword (parser, RID_FOR))
c_parser_for_statement (parser, ivdep, unroll, if_p);
else if (c_parser_next_token_is_keyword (parser, RID_WHILE))
c_parser_while_statement (parser, ivdep, unroll, if_p);
else
c_parser_do_statement (parser, ivdep, unroll);
}
return false;
case PRAGMA_UNROLL:
{
unsigned short unroll = c_parser_pragma_unroll (parser);
bool ivdep;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_IVDEP)
ivdep = c_parse_pragma_ivdep (parser);
else
ivdep = false;
if (!c_parser_next_token_is_keyword (parser, RID_FOR)
&& !c_parser_next_token_is_keyword (parser, RID_WHILE)
&& !c_parser_next_token_is_keyword (parser, RID_DO))
{
c_parser_error (parser, "for, while or do statement expected");
return false;
}
if (c_parser_next_token_is_keyword (parser, RID_FOR))
c_parser_for_statement (parser, ivdep, unroll, if_p);
else if (c_parser_next_token_is_keyword (parser, RID_WHILE))
c_parser_while_statement (parser, ivdep, unroll, if_p);
else
c_parser_do_statement (parser, ivdep, unroll);
}
return false;
case PRAGMA_GCC_PCH_PREPROCESS:
c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
case PRAGMA_OACC_WAIT:
if (context != pragma_compound)
{
construct = "acc wait";
goto in_compound;
}
/* FALL THROUGH. */
default:
if (id < PRAGMA_FIRST_EXTERNAL)
{
if (context != pragma_stmt && context != pragma_compound)
{
bad_stmt:
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
c_parser_omp_construct (parser, if_p);
return true;
}
break;
}
c_parser_consume_pragma (parser);
c_invoke_pragma_handler (id);
/* Skip to EOL, but suppress any error message. Those will have been
generated by the handler routine through calling error, as opposed
to calling c_parser_error. */
parser->error = true;
c_parser_skip_to_pragma_eol (parser);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value, location_t *loc)
{
c_token *tok = c_parser_peek_token (the_parser);
enum cpp_ttype ret = tok->type;
*value = tok->value;
if (loc)
*loc = tok->location;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else
{
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
c_parser_consume_token (the_parser);
}
return ret;
}
static void
c_parser_pragma_pch_preprocess (c_parser *parser)
{
tree name = NULL;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_STRING))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
c_parser_error (parser, "expected string literal");
c_parser_skip_to_pragma_eol (parser);
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
}
/* OpenACC and OpenMP parsing routines. */
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
c_parser_omp_clause_name (c_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (c_parser_next_token_is_keyword (parser, RID_AUTO))
result = PRAGMA_OACC_CLAUSE_AUTO;
else if (c_parser_next_token_is_keyword (parser, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (c_parser_next_token_is_keyword (parser, RID_FOR))
result = PRAGMA_OMP_CLAUSE_FOR;
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'a':
if (!strcmp ("aligned", p))
result = PRAGMA_OMP_CLAUSE_ALIGNED;
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
break;
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
else if (!strcmp ("copy", p))
result = PRAGMA_OACC_CLAUSE_COPY;
else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyout", p))
result = PRAGMA_OACC_CLAUSE_COPYOUT;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
else if (!strcmp ("create", p))
result = PRAGMA_OACC_CLAUSE_CREATE;
break;
case 'd':
if (!strcmp ("defaultmap", p))
result = PRAGMA_OMP_CLAUSE_DEFAULTMAP;
else if (!strcmp ("delete", p))
result = PRAGMA_OACC_CLAUSE_DELETE;
else if (!strcmp ("depend", p))
result = PRAGMA_OMP_CLAUSE_DEPEND;
else if (!strcmp ("device", p))
result = PRAGMA_OMP_CLAUSE_DEVICE;
else if (!strcmp ("deviceptr", p))
result = PRAGMA_OACC_CLAUSE_DEVICEPTR;
else if (!strcmp ("device_resident", p))
result = PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT;
else if (!strcmp ("dist_schedule", p))
result = PRAGMA_OMP_CLAUSE_DIST_SCHEDULE;
break;
case 'f':
if (!strcmp ("final", p))
result = PRAGMA_OMP_CLAUSE_FINAL;
else if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
else if (!strcmp ("from", p))
result = PRAGMA_OMP_CLAUSE_FROM;
break;
case 'g':
if (!strcmp ("gang", p))
result = PRAGMA_OACC_CLAUSE_GANG;
else if (!strcmp ("grainsize", p))
result = PRAGMA_OMP_CLAUSE_GRAINSIZE;
break;
case 'h':
if (!strcmp ("hint", p))
result = PRAGMA_OMP_CLAUSE_HINT;
else if (!strcmp ("host", p))
result = PRAGMA_OACC_CLAUSE_HOST;
break;
case 'i':
if (!strcmp ("inbranch", p))
result = PRAGMA_OMP_CLAUSE_INBRANCH;
else if (!strcmp ("independent", p))
result = PRAGMA_OACC_CLAUSE_INDEPENDENT;
else if (!strcmp ("is_device_ptr", p))
result = PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
else if (!strcmp ("linear", p))
result = PRAGMA_OMP_CLAUSE_LINEAR;
else if (!strcmp ("link", p))
result = PRAGMA_OMP_CLAUSE_LINK;
break;
case 'm':
if (!strcmp ("map", p))
result = PRAGMA_OMP_CLAUSE_MAP;
else if (!strcmp ("mergeable", p))
result = PRAGMA_OMP_CLAUSE_MERGEABLE;
break;
case 'n':
if (!strcmp ("nogroup", p))
result = PRAGMA_OMP_CLAUSE_NOGROUP;
else if (!strcmp ("notinbranch", p))
result = PRAGMA_OMP_CLAUSE_NOTINBRANCH;
else if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (!strcmp ("num_gangs", p))
result = PRAGMA_OACC_CLAUSE_NUM_GANGS;
else if (!strcmp ("num_tasks", p))
result = PRAGMA_OMP_CLAUSE_NUM_TASKS;
else if (!strcmp ("num_teams", p))
result = PRAGMA_OMP_CLAUSE_NUM_TEAMS;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
else if (!strcmp ("num_workers", p))
result = PRAGMA_OACC_CLAUSE_NUM_WORKERS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
break;
case 'p':
if (!strcmp ("parallel", p))
result = PRAGMA_OMP_CLAUSE_PARALLEL;
else if (!strcmp ("present", p))
result = PRAGMA_OACC_CLAUSE_PRESENT;
else if (!strcmp ("present_or_copy", p)
|| !strcmp ("pcopy", p))
result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY;
else if (!strcmp ("present_or_copyin", p)
|| !strcmp ("pcopyin", p))
result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN;
else if (!strcmp ("present_or_copyout", p)
|| !strcmp ("pcopyout", p))
result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT;
else if (!strcmp ("present_or_create", p)
|| !strcmp ("pcreate", p))
result = PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE;
else if (!strcmp ("priority", p))
result = PRAGMA_OMP_CLAUSE_PRIORITY;
else if (!strcmp ("private", p))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
else if (!strcmp ("proc_bind", p))
result = PRAGMA_OMP_CLAUSE_PROC_BIND;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("safelen", p))
result = PRAGMA_OMP_CLAUSE_SAFELEN;
else if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("sections", p))
result = PRAGMA_OMP_CLAUSE_SECTIONS;
else if (!strcmp ("seq", p))
result = PRAGMA_OACC_CLAUSE_SEQ;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
else if (!strcmp ("simd", p))
result = PRAGMA_OMP_CLAUSE_SIMD;
else if (!strcmp ("simdlen", p))
result = PRAGMA_OMP_CLAUSE_SIMDLEN;
else if (!strcmp ("self", p))
result = PRAGMA_OACC_CLAUSE_SELF;
break;
case 't':
if (!strcmp ("taskgroup", p))
result = PRAGMA_OMP_CLAUSE_TASKGROUP;
else if (!strcmp ("thread_limit", p))
result = PRAGMA_OMP_CLAUSE_THREAD_LIMIT;
else if (!strcmp ("threads", p))
result = PRAGMA_OMP_CLAUSE_THREADS;
else if (!strcmp ("tile", p))
result = PRAGMA_OACC_CLAUSE_TILE;
else if (!strcmp ("to", p))
result = PRAGMA_OMP_CLAUSE_TO;
break;
case 'u':
if (!strcmp ("uniform", p))
result = PRAGMA_OMP_CLAUSE_UNIFORM;
else if (!strcmp ("untied", p))
result = PRAGMA_OMP_CLAUSE_UNTIED;
else if (!strcmp ("use_device", p))
result = PRAGMA_OACC_CLAUSE_USE_DEVICE;
else if (!strcmp ("use_device_ptr", p))
result = PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR;
break;
case 'v':
if (!strcmp ("vector", p))
result = PRAGMA_OACC_CLAUSE_VECTOR;
else if (!strcmp ("vector_length", p))
result = PRAGMA_OACC_CLAUSE_VECTOR_LENGTH;
break;
case 'w':
if (!strcmp ("wait", p))
result = PRAGMA_OACC_CLAUSE_WAIT;
else if (!strcmp ("worker", p))
result = PRAGMA_OACC_CLAUSE_WORKER;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
c_parser_consume_token (parser);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum omp_clause_code code,
const char *name)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == code)
{
location_t loc = OMP_CLAUSE_LOCATION (c);
error_at (loc, "too many %qs clauses", name);
break;
}
}
/* OpenACC 2.0
Parse wait clause or wait directive parameters. */
static tree
c_parser_oacc_wait_list (c_parser *parser, location_t clause_loc, tree list)
{
vec<tree, va_gc> *args;
tree t, args_tree;
matching_parens parens;
if (!parens.require_open (parser))
return list;
args = c_parser_expr_list (parser, false, true, NULL, NULL, NULL, NULL);
if (args->length () == 0)
{
c_parser_error (parser, "expected integer expression before ')'");
release_tree_vector (args);
return list;
}
args_tree = build_tree_list_vec (args);
for (t = args_tree; t; t = TREE_CHAIN (t))
{
tree targ = TREE_VALUE (t);
if (targ != error_mark_node)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (targ)))
{
c_parser_error (parser, "expression must be integral");
targ = error_mark_node;
}
else
{
tree c = build_omp_clause (clause_loc, OMP_CLAUSE_WAIT);
OMP_CLAUSE_DECL (c) = targ;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
}
}
release_tree_vector (args);
parens.require_close (parser);
return list;
}
/* OpenACC 2.0, OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
If KIND is nonzero, create the appropriate node and install the
decl in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is nonzero, CLAUSE_LOC is the location of the clause.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created. */
static tree
c_parser_omp_variable_list (c_parser *parser,
location_t clause_loc,
enum omp_clause_code kind, tree list)
{
if (c_parser_next_token_is_not (parser, CPP_NAME)
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
c_parser_error (parser, "expected identifier");
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
tree t = lookup_name (c_parser_peek_token (parser)->value);
if (t == NULL_TREE)
{
undeclared_variable (c_parser_peek_token (parser)->location,
c_parser_peek_token (parser)->value);
t = error_mark_node;
}
c_parser_consume_token (parser);
if (t == error_mark_node)
;
else if (kind != 0)
{
switch (kind)
{
case OMP_CLAUSE__CACHE_:
/* The OpenACC cache directive explicitly only allows "array
elements or subarrays". */
if (c_parser_peek_token (parser)->type != CPP_OPEN_SQUARE)
{
c_parser_error (parser, "expected %<[%>");
t = error_mark_node;
break;
}
/* FALLTHROUGH */
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_TO:
while (c_parser_next_token_is (parser, CPP_DOT))
{
location_t op_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (!c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
t = error_mark_node;
break;
}
c_token *comp_tok = c_parser_peek_token (parser);
tree ident = comp_tok->value;
location_t comp_loc = comp_tok->location;
c_parser_consume_token (parser);
t = build_component_ref (op_loc, t, ident, comp_loc);
}
/* FALLTHROUGH */
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_REDUCTION:
while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
tree low_bound = NULL_TREE, length = NULL_TREE;
c_parser_consume_token (parser);
if (!c_parser_next_token_is (parser, CPP_COLON))
{
location_t expr_loc
= c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr,
false, true);
low_bound = expr.value;
}
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
length = integer_one_node;
else
{
/* Look for `:'. */
if (!c_parser_require (parser, CPP_COLON,
"expected %<:%>"))
{
t = error_mark_node;
break;
}
if (!c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
location_t expr_loc
= c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr,
false, true);
length = expr.value;
}
}
/* Look for the closing `]'. */
if (!c_parser_require (parser, CPP_CLOSE_SQUARE,
"expected %<]%>"))
{
t = error_mark_node;
break;
}
t = tree_cons (low_bound, length, t);
}
break;
default:
break;
}
if (t != error_mark_node)
{
tree u = build_omp_clause (clause_loc, kind);
OMP_CLAUSE_DECL (u) = t;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
}
else
list = tree_cons (t, NULL_TREE, list);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for OpenACC and OpenMP clauses. */
static tree
c_parser_omp_var_list_parens (c_parser *parser, enum omp_clause_code kind,
tree list)
{
/* The clauses location. */
location_t loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
list = c_parser_omp_variable_list (parser, loc, kind, list);
parens.skip_until_found_close (parser);
}
return list;
}
/* OpenACC 2.0:
copy ( variable-list )
copyin ( variable-list )
copyout ( variable-list )
create ( variable-list )
delete ( variable-list )
present ( variable-list )
present_or_copy ( variable-list )
pcopy ( variable-list )
present_or_copyin ( variable-list )
pcopyin ( variable-list )
present_or_copyout ( variable-list )
pcopyout ( variable-list )
present_or_create ( variable-list )
pcreate ( variable-list ) */
static tree
c_parser_oacc_data_clause (c_parser *parser, pragma_omp_clause c_kind,
tree list)
{
enum gomp_map_kind kind;
switch (c_kind)
{
case PRAGMA_OACC_CLAUSE_COPY:
kind = GOMP_MAP_FORCE_TOFROM;
break;
case PRAGMA_OACC_CLAUSE_COPYIN:
kind = GOMP_MAP_FORCE_TO;
break;
case PRAGMA_OACC_CLAUSE_COPYOUT:
kind = GOMP_MAP_FORCE_FROM;
break;
case PRAGMA_OACC_CLAUSE_CREATE:
kind = GOMP_MAP_FORCE_ALLOC;
break;
case PRAGMA_OACC_CLAUSE_DELETE:
kind = GOMP_MAP_DELETE;
break;
case PRAGMA_OACC_CLAUSE_DEVICE:
kind = GOMP_MAP_FORCE_TO;
break;
case PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT:
kind = GOMP_MAP_DEVICE_RESIDENT;
break;
case PRAGMA_OACC_CLAUSE_HOST:
case PRAGMA_OACC_CLAUSE_SELF:
kind = GOMP_MAP_FORCE_FROM;
break;
case PRAGMA_OACC_CLAUSE_LINK:
kind = GOMP_MAP_LINK;
break;
case PRAGMA_OACC_CLAUSE_PRESENT:
kind = GOMP_MAP_FORCE_PRESENT;
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY:
kind = GOMP_MAP_TOFROM;
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN:
kind = GOMP_MAP_TO;
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT:
kind = GOMP_MAP_FROM;
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE:
kind = GOMP_MAP_ALLOC;
break;
default:
gcc_unreachable ();
}
tree nl, c;
nl = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_MAP, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_SET_MAP_KIND (c, kind);
return nl;
}
/* OpenACC 2.0:
deviceptr ( variable-list ) */
static tree
c_parser_oacc_data_clause_deviceptr (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
tree vars, t;
/* Can't use OMP_CLAUSE_MAP here (that is, can't use the generic
c_parser_oacc_data_clause), as for PRAGMA_OACC_CLAUSE_DEVICEPTR,
variable-list must only allow for pointer variables. */
vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL);
for (t = vars; t && t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
/* FIXME diagnostics: Ideally we should keep individual
locations for all the variables in the var list to make the
following errors more precise. Perhaps
c_parser_omp_var_list_parens() should construct a list of
locations to go along with the var list. */
if (!VAR_P (v) && TREE_CODE (v) != PARM_DECL)
error_at (loc, "%qD is not a variable", v);
else if (TREE_TYPE (v) == error_mark_node)
;
else if (!POINTER_TYPE_P (TREE_TYPE (v)))
error_at (loc, "%qD is not a pointer variable", v);
tree u = build_omp_clause (loc, OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (u, GOMP_MAP_FORCE_DEVICEPTR);
OMP_CLAUSE_DECL (u) = v;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
return list;
}
/* OpenACC 2.0, OpenMP 3.0:
collapse ( constant-expression ) */
static tree
c_parser_omp_clause_collapse (c_parser *parser, tree list)
{
tree c, num = error_mark_node;
HOST_WIDE_INT n;
location_t loc;
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse");
check_no_duplicate_clause (list, OMP_CLAUSE_TILE, "tile");
loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
num = c_parser_expr_no_commas (parser, NULL).value;
parens.skip_until_found_close (parser);
}
if (num == error_mark_node)
return list;
mark_exp_read (num);
num = c_fully_fold (num, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !tree_fits_shwi_p (num)
|| (n = tree_to_shwi (num)) <= 0
|| (int) n != n)
{
error_at (loc,
"collapse argument needs positive constant integer expression");
return list;
}
c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
copyin ( variable-list ) */
static tree
c_parser_omp_clause_copyin (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list);
}
/* OpenMP 2.5:
copyprivate ( variable-list ) */
static tree
c_parser_omp_clause_copyprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list);
}
/* OpenMP 2.5:
default ( none | shared )
OpenACC:
default ( none | present ) */
static tree
c_parser_omp_clause_default (c_parser *parser, tree list, bool is_oacc)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
location_t loc = c_parser_peek_token (parser)->location;
tree c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 'p':
if (strcmp ("present", p) != 0 || !is_oacc)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_PRESENT;
break;
case 's':
if (strcmp ("shared", p) != 0 || is_oacc)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
c_parser_consume_token (parser);
}
else
{
invalid_kind:
if (is_oacc)
c_parser_error (parser, "expected %<none%> or %<present%>");
else
c_parser_error (parser, "expected %<none%> or %<shared%>");
}
parens.skip_until_found_close (parser);
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default");
c = build_omp_clause (loc, OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 2.5:
firstprivate ( variable-list ) */
static tree
c_parser_omp_clause_firstprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list);
}
/* OpenMP 3.1:
final ( expression ) */
static tree
c_parser_omp_clause_final (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree t = c_parser_paren_condition (parser);
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final");
c = build_omp_clause (loc, OMP_CLAUSE_FINAL);
OMP_CLAUSE_FINAL_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
else
c_parser_error (parser, "expected %<(%>");
return list;
}
/* OpenACC, OpenMP 2.5:
if ( expression )
OpenMP 4.5:
if ( directive-name-modifier : expression )
directive-name-modifier:
parallel | task | taskloop | target data | target | target update
| target enter data | target exit data */
static tree
c_parser_omp_clause_if (c_parser *parser, tree list, bool is_omp)
{
location_t location = c_parser_peek_token (parser)->location;
enum tree_code if_modifier = ERROR_MARK;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (is_omp && c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
int n = 2;
if (strcmp (p, "parallel") == 0)
if_modifier = OMP_PARALLEL;
else if (strcmp (p, "task") == 0)
if_modifier = OMP_TASK;
else if (strcmp (p, "taskloop") == 0)
if_modifier = OMP_TASKLOOP;
else if (strcmp (p, "target") == 0)
{
if_modifier = OMP_TARGET;
if (c_parser_peek_2nd_token (parser)->type == CPP_NAME)
{
p = IDENTIFIER_POINTER (c_parser_peek_2nd_token (parser)->value);
if (strcmp ("data", p) == 0)
if_modifier = OMP_TARGET_DATA;
else if (strcmp ("update", p) == 0)
if_modifier = OMP_TARGET_UPDATE;
else if (strcmp ("enter", p) == 0)
if_modifier = OMP_TARGET_ENTER_DATA;
else if (strcmp ("exit", p) == 0)
if_modifier = OMP_TARGET_EXIT_DATA;
if (if_modifier != OMP_TARGET)
{
n = 3;
c_parser_consume_token (parser);
}
else
{
location_t loc = c_parser_peek_2nd_token (parser)->location;
error_at (loc, "expected %<data%>, %<update%>, %<enter%> "
"or %<exit%>");
if_modifier = ERROR_MARK;
}
if (if_modifier == OMP_TARGET_ENTER_DATA
|| if_modifier == OMP_TARGET_EXIT_DATA)
{
if (c_parser_peek_2nd_token (parser)->type == CPP_NAME)
{
p = IDENTIFIER_POINTER
(c_parser_peek_2nd_token (parser)->value);
if (strcmp ("data", p) == 0)
n = 4;
}
if (n == 4)
c_parser_consume_token (parser);
else
{
location_t loc
= c_parser_peek_2nd_token (parser)->location;
error_at (loc, "expected %<data%>");
if_modifier = ERROR_MARK;
}
}
}
}
if (if_modifier != ERROR_MARK)
{
if (c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else
{
if (n > 2)
{
location_t loc = c_parser_peek_2nd_token (parser)->location;
error_at (loc, "expected %<:%>");
}
if_modifier = ERROR_MARK;
}
}
}
tree t = c_parser_condition (parser), c;
parens.skip_until_found_close (parser);
for (c = list; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IF)
{
if (if_modifier != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (c) == if_modifier)
{
const char *p = NULL;
switch (if_modifier)
{
case OMP_PARALLEL: p = "parallel"; break;
case OMP_TASK: p = "task"; break;
case OMP_TASKLOOP: p = "taskloop"; break;
case OMP_TARGET_DATA: p = "target data"; break;
case OMP_TARGET: p = "target"; break;
case OMP_TARGET_UPDATE: p = "target update"; break;
case OMP_TARGET_ENTER_DATA: p = "enter data"; break;
case OMP_TARGET_EXIT_DATA: p = "exit data"; break;
default: gcc_unreachable ();
}
error_at (location, "too many %<if%> clauses with %qs modifier",
p);
return list;
}
else if (OMP_CLAUSE_IF_MODIFIER (c) == if_modifier)
{
if (!is_omp)
error_at (location, "too many %<if%> clauses");
else
error_at (location, "too many %<if%> clauses without modifier");
return list;
}
else if (if_modifier == ERROR_MARK
|| OMP_CLAUSE_IF_MODIFIER (c) == ERROR_MARK)
{
error_at (location, "if any %<if%> clause has modifier, then all "
"%<if%> clauses have to use modifier");
return list;
}
}
c = build_omp_clause (location, OMP_CLAUSE_IF);
OMP_CLAUSE_IF_MODIFIER (c) = if_modifier;
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
lastprivate ( variable-list ) */
static tree
c_parser_omp_clause_lastprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list);
}
/* OpenMP 3.1:
mergeable */
static tree
c_parser_omp_clause_mergeable (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
/* FIXME: Should we allow duplicates? */
check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable");
c = build_omp_clause (c_parser_peek_token (parser)->location,
OMP_CLAUSE_MERGEABLE);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
nowait */
static tree
c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
location_t loc = c_parser_peek_token (parser)->location;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait");
c = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
c_parser_omp_clause_num_threads (c_parser *parser, tree list)
{
location_t num_threads_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0,
"%<num_threads%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads");
c = build_omp_clause (num_threads_loc, OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
num_tasks ( expression ) */
static tree
c_parser_omp_clause_num_tasks (c_parser *parser, tree list)
{
location_t num_tasks_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (CAN_HAVE_LOCATION_P (c))
SET_EXPR_LOCATION (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<num_tasks%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TASKS, "num_tasks");
c = build_omp_clause (num_tasks_loc, OMP_CLAUSE_NUM_TASKS);
OMP_CLAUSE_NUM_TASKS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
grainsize ( expression ) */
static tree
c_parser_omp_clause_grainsize (c_parser *parser, tree list)
{
location_t grainsize_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (CAN_HAVE_LOCATION_P (c))
SET_EXPR_LOCATION (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<grainsize%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_GRAINSIZE, "grainsize");
c = build_omp_clause (grainsize_loc, OMP_CLAUSE_GRAINSIZE);
OMP_CLAUSE_GRAINSIZE_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
priority ( expression ) */
static tree
c_parser_omp_clause_priority (c_parser *parser, tree list)
{
location_t priority_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't
non-negative. */
c = fold_build2_loc (expr_loc, LT_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (CAN_HAVE_LOCATION_P (c))
SET_EXPR_LOCATION (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<priority%> value must be non-negative");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_PRIORITY, "priority");
c = build_omp_clause (priority_loc, OMP_CLAUSE_PRIORITY);
OMP_CLAUSE_PRIORITY_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
hint ( expression ) */
static tree
c_parser_omp_clause_hint (c_parser *parser, tree list)
{
location_t hint_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_HINT, "hint");
c = build_omp_clause (hint_loc, OMP_CLAUSE_HINT);
OMP_CLAUSE_HINT_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
defaultmap ( tofrom : scalar ) */
static tree
c_parser_omp_clause_defaultmap (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
tree c;
const char *p;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (!c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<tofrom%>");
goto out_err;
}
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "tofrom") != 0)
{
c_parser_error (parser, "expected %<tofrom%>");
goto out_err;
}
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
goto out_err;
if (!c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<scalar%>");
goto out_err;
}
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "scalar") != 0)
{
c_parser_error (parser, "expected %<scalar%>");
goto out_err;
}
c_parser_consume_token (parser);
parens.skip_until_found_close (parser);
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULTMAP, "defaultmap");
c = build_omp_clause (loc, OMP_CLAUSE_DEFAULTMAP);
OMP_CLAUSE_CHAIN (c) = list;
return c;
out_err:
parens.skip_until_found_close (parser);
return list;
}
/* OpenACC 2.0:
use_device ( variable-list )
OpenMP 4.5:
use_device_ptr ( variable-list ) */
static tree
c_parser_omp_clause_use_device_ptr (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_USE_DEVICE_PTR,
list);
}
/* OpenMP 4.5:
is_device_ptr ( variable-list ) */
static tree
c_parser_omp_clause_is_device_ptr (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_IS_DEVICE_PTR, list);
}
/* OpenACC:
num_gangs ( expression )
num_workers ( expression )
vector_length ( expression ) */
static tree
c_parser_oacc_single_int_clause (c_parser *parser, omp_clause_code code,
tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (!parens.require_open (parser))
return list;
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (t == error_mark_node)
return list;
else if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error_at (expr_loc, "%qs expression must be integral",
omp_clause_code_name[code]);
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0,
"%qs value must be positive",
omp_clause_code_name[code]);
t = integer_one_node;
}
check_no_duplicate_clause (list, code, omp_clause_code_name[code]);
c = build_omp_clause (loc, code);
OMP_CLAUSE_OPERAND (c, 0) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenACC:
gang [( gang-arg-list )]
worker [( [num:] int-expr )]
vector [( [length:] int-expr )]
where gang-arg is one of:
[num:] int-expr
static: size-expr
and size-expr may be:
*
int-expr
*/
static tree
c_parser_oacc_shape_clause (c_parser *parser, omp_clause_code kind,
const char *str, tree list)
{
const char *id = "num";
tree ops[2] = { NULL_TREE, NULL_TREE }, c;
location_t loc = c_parser_peek_token (parser)->location;
if (kind == OMP_CLAUSE_VECTOR)
id = "length";
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
do
{
c_token *next = c_parser_peek_token (parser);
int idx = 0;
/* Gang static argument. */
if (kind == OMP_CLAUSE_GANG
&& c_parser_next_token_is_keyword (parser, RID_STATIC))
{
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
goto cleanup_error;
idx = 1;
if (ops[idx] != NULL_TREE)
{
c_parser_error (parser, "too many %<static%> arguments");
goto cleanup_error;
}
/* Check for the '*' argument. */
if (c_parser_next_token_is (parser, CPP_MULT)
&& (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type
== CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
ops[idx] = integer_minus_one_node;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
else
break;
}
}
/* Worker num: argument and vector length: arguments. */
else if (c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (id, IDENTIFIER_POINTER (next->value)) == 0
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
c_parser_consume_token (parser); /* id */
c_parser_consume_token (parser); /* ':' */
}
/* Now collect the actual argument. */
if (ops[idx] != NULL_TREE)
{
c_parser_error (parser, "unexpected argument");
goto cleanup_error;
}
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr cexpr = c_parser_expr_no_commas (parser, NULL);
cexpr = convert_lvalue_to_rvalue (expr_loc, cexpr, false, true);
tree expr = cexpr.value;
if (expr == error_mark_node)
goto cleanup_error;
expr = c_fully_fold (expr, false, NULL);
/* Attempt to statically determine when the number isn't a
positive integer. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (expr)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
tree c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, expr,
build_int_cst (TREE_TYPE (expr), 0));
if (c == boolean_true_node)
{
warning_at (loc, 0,
"%qs value must be positive", str);
expr = integer_one_node;
}
ops[idx] = expr;
if (kind == OMP_CLAUSE_GANG
&& c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
break;
}
while (1);
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
goto cleanup_error;
}
check_no_duplicate_clause (list, kind, str);
c = build_omp_clause (loc, kind);
if (ops[1])
OMP_CLAUSE_OPERAND (c, 1) = ops[1];
OMP_CLAUSE_OPERAND (c, 0) = ops[0];
OMP_CLAUSE_CHAIN (c) = list;
return c;
cleanup_error:
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
/* OpenACC:
auto
independent
nohost
seq */
static tree
c_parser_oacc_simple_clause (c_parser *parser, enum omp_clause_code code,
tree list)
{
check_no_duplicate_clause (list, code, omp_clause_code_name[code]);
tree c = build_omp_clause (c_parser_peek_token (parser)->location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenACC:
async [( int-expr )] */
static tree
c_parser_oacc_clause_async (c_parser *parser, tree list)
{
tree c, t;
location_t loc = c_parser_peek_token (parser)->location;
t = build_int_cst (integer_type_node, GOMP_ASYNC_NOVAL);
if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN)
{
c_parser_consume_token (parser);
t = c_parser_expression (parser).value;
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
c_parser_error (parser, "expected integer expression");
else if (t == error_mark_node
|| !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
return list;
}
else
t = c_fully_fold (t, false, NULL);
check_no_duplicate_clause (list, OMP_CLAUSE_ASYNC, "async");
c = build_omp_clause (loc, OMP_CLAUSE_ASYNC);
OMP_CLAUSE_ASYNC_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
return list;
}
/* OpenACC 2.0:
tile ( size-expr-list ) */
static tree
c_parser_oacc_clause_tile (c_parser *parser, tree list)
{
tree c, expr = error_mark_node;
location_t loc;
tree tile = NULL_TREE;
check_no_duplicate_clause (list, OMP_CLAUSE_TILE, "tile");
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse");
loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
do
{
if (tile && !c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
return list;
if (c_parser_next_token_is (parser, CPP_MULT)
&& (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
expr = integer_zero_node;
}
else
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr cexpr = c_parser_expr_no_commas (parser, NULL);
cexpr = convert_lvalue_to_rvalue (expr_loc, cexpr, false, true);
expr = cexpr.value;
if (expr == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return list;
}
expr = c_fully_fold (expr, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
|| !tree_fits_shwi_p (expr)
|| tree_to_shwi (expr) <= 0)
{
error_at (expr_loc, "%<tile%> argument needs positive"
" integral constant");
expr = integer_zero_node;
}
}
tile = tree_cons (NULL_TREE, expr, tile);
}
while (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN));
/* Consume the trailing ')'. */
c_parser_consume_token (parser);
c = build_omp_clause (loc, OMP_CLAUSE_TILE);
tile = nreverse (tile);
OMP_CLAUSE_TILE_LIST (c) = tile;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenACC:
wait ( int-expr-list ) */
static tree
c_parser_oacc_clause_wait (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN)
list = c_parser_oacc_wait_list (parser, clause_loc, list);
return list;
}
/* OpenMP 2.5:
ordered
OpenMP 4.5:
ordered ( constant-expression ) */
static tree
c_parser_omp_clause_ordered (c_parser *parser, tree list)
{
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered");
tree c, num = NULL_TREE;
HOST_WIDE_INT n;
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
matching_parens parens;
parens.consume_open (parser);
num = c_parser_expr_no_commas (parser, NULL).value;
parens.skip_until_found_close (parser);
}
if (num == error_mark_node)
return list;
if (num)
{
mark_exp_read (num);
num = c_fully_fold (num, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !tree_fits_shwi_p (num)
|| (n = tree_to_shwi (num)) <= 0
|| (int) n != n)
{
error_at (loc, "ordered argument needs positive "
"constant integer expression");
return list;
}
}
c = build_omp_clause (loc, OMP_CLAUSE_ORDERED);
OMP_CLAUSE_ORDERED_EXPR (c) = num;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
private ( variable-list ) */
static tree
c_parser_omp_clause_private (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list);
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && ||
OpenMP 3.1:
reduction-operator:
One of: + * - & ^ | && || max min
OpenMP 4.0:
reduction-operator:
One of: + * - & ^ | && ||
identifier */
static tree
c_parser_omp_clause_reduction (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
enum tree_code code = ERROR_MARK;
tree reduc_id = NULL_TREE;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
code = PLUS_EXPR;
break;
case CPP_MULT:
code = MULT_EXPR;
break;
case CPP_MINUS:
code = MINUS_EXPR;
break;
case CPP_AND:
code = BIT_AND_EXPR;
break;
case CPP_XOR:
code = BIT_XOR_EXPR;
break;
case CPP_OR:
code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
code = TRUTH_ORIF_EXPR;
break;
case CPP_NAME:
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "min") == 0)
{
code = MIN_EXPR;
break;
}
if (strcmp (p, "max") == 0)
{
code = MAX_EXPR;
break;
}
reduc_id = c_parser_peek_token (parser)->value;
break;
}
default:
c_parser_error (parser,
"expected %<+%>, %<*%>, %<-%>, %<&%>, "
"%<^%>, %<|%>, %<&&%>, %<||%> or identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
c_parser_consume_token (parser);
reduc_id = c_omp_reduction_id (code, reduc_id);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
tree nl, c;
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_REDUCTION, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
{
tree d = OMP_CLAUSE_DECL (c), type;
if (TREE_CODE (d) != TREE_LIST)
type = TREE_TYPE (d);
else
{
int cnt = 0;
tree t;
for (t = d; TREE_CODE (t) == TREE_LIST; t = TREE_CHAIN (t))
cnt++;
type = TREE_TYPE (t);
while (cnt > 0)
{
if (TREE_CODE (type) != POINTER_TYPE
&& TREE_CODE (type) != ARRAY_TYPE)
break;
type = TREE_TYPE (type);
cnt--;
}
}
while (TREE_CODE (type) == ARRAY_TYPE)
type = TREE_TYPE (type);
OMP_CLAUSE_REDUCTION_CODE (c) = code;
if (code == ERROR_MARK
|| !(INTEGRAL_TYPE_P (type)
|| TREE_CODE (type) == REAL_TYPE
|| TREE_CODE (type) == COMPLEX_TYPE))
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= c_omp_reduction_lookup (reduc_id,
TYPE_MAIN_VARIANT (type));
}
list = nl;
}
parens.skip_until_found_close (parser);
}
return list;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime | auto
OpenMP 4.5:
schedule ( schedule-modifier : schedule-kind )
schedule ( schedule-modifier [ , schedule-modifier ] : schedule-kind , expression )
schedule-modifier:
simd
monotonic
nonmonotonic */
static tree
c_parser_omp_clause_schedule (c_parser *parser, tree list)
{
tree c, t;
location_t loc = c_parser_peek_token (parser)->location;
int modifiers = 0, nmodifiers = 0;
matching_parens parens;
if (!parens.require_open (parser))
return list;
c = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE);
while (c_parser_next_token_is (parser, CPP_NAME))
{
tree kind = c_parser_peek_token (parser)->value;
const char *p = IDENTIFIER_POINTER (kind);
if (strcmp ("simd", p) == 0)
OMP_CLAUSE_SCHEDULE_SIMD (c) = 1;
else if (strcmp ("monotonic", p) == 0)
modifiers |= OMP_CLAUSE_SCHEDULE_MONOTONIC;
else if (strcmp ("nonmonotonic", p) == 0)
modifiers |= OMP_CLAUSE_SCHEDULE_NONMONOTONIC;
else
break;
c_parser_consume_token (parser);
if (nmodifiers++ == 0
&& c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
{
c_parser_require (parser, CPP_COLON, "expected %<:%>");
break;
}
}
if ((modifiers & (OMP_CLAUSE_SCHEDULE_MONOTONIC
| OMP_CLAUSE_SCHEDULE_NONMONOTONIC))
== (OMP_CLAUSE_SCHEDULE_MONOTONIC
| OMP_CLAUSE_SCHEDULE_NONMONOTONIC))
{
error_at (loc, "both %<monotonic%> and %<nonmonotonic%> modifiers "
"specified");
modifiers = 0;
}
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree kind = c_parser_peek_token (parser)->value;
const char *p = IDENTIFIER_POINTER (kind);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (c_parser_next_token_is_keyword (parser, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else if (c_parser_next_token_is_keyword (parser, RID_AUTO))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
location_t here;
c_parser_consume_token (parser);
here = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (here, expr, false, true);
t = expr.value;
t = c_fully_fold (t, false, NULL);
if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error_at (here, "schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
error_at (here,
"schedule %<auto%> does not take "
"a %<chunk_size%> parameter");
else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE)
{
/* Attempt to statically determine when the number isn't
positive. */
tree s = fold_build2_loc (loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (s, loc);
if (s == boolean_true_node)
{
warning_at (loc, 0,
"chunk size value must be positive");
t = integer_one_node;
}
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
}
else
c_parser_error (parser, "expected integer expression");
parens.skip_until_found_close (parser);
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<,%> or %<)%>");
OMP_CLAUSE_SCHEDULE_KIND (c)
= (enum omp_clause_schedule_kind)
(OMP_CLAUSE_SCHEDULE_KIND (c) | modifiers);
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule");
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
c_parser_error (parser, "invalid schedule kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
/* OpenMP 2.5:
shared ( variable-list ) */
static tree
c_parser_omp_clause_shared (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list);
}
/* OpenMP 3.0:
untied */
static tree
c_parser_omp_clause_untied (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
/* FIXME: Should we allow duplicates? */
check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied");
c = build_omp_clause (c_parser_peek_token (parser)->location,
OMP_CLAUSE_UNTIED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
inbranch
notinbranch */
static tree
c_parser_omp_clause_branch (c_parser *parser ATTRIBUTE_UNUSED,
enum omp_clause_code code, tree list)
{
check_no_duplicate_clause (list, code, omp_clause_code_name[code]);
tree c = build_omp_clause (c_parser_peek_token (parser)->location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
parallel
for
sections
taskgroup */
static tree
c_parser_omp_clause_cancelkind (c_parser *parser ATTRIBUTE_UNUSED,
enum omp_clause_code code, tree list)
{
tree c = build_omp_clause (c_parser_peek_token (parser)->location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.5:
nogroup */
static tree
c_parser_omp_clause_nogroup (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
check_no_duplicate_clause (list, OMP_CLAUSE_NOGROUP, "nogroup");
tree c = build_omp_clause (c_parser_peek_token (parser)->location,
OMP_CLAUSE_NOGROUP);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.5:
simd
threads */
static tree
c_parser_omp_clause_orderedkind (c_parser *parser ATTRIBUTE_UNUSED,
enum omp_clause_code code, tree list)
{
check_no_duplicate_clause (list, code, omp_clause_code_name[code]);
tree c = build_omp_clause (c_parser_peek_token (parser)->location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
num_teams ( expression ) */
static tree
c_parser_omp_clause_num_teams (c_parser *parser, tree list)
{
location_t num_teams_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<num_teams%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TEAMS, "num_teams");
c = build_omp_clause (num_teams_loc, OMP_CLAUSE_NUM_TEAMS);
OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.0:
thread_limit ( expression ) */
static tree
c_parser_omp_clause_thread_limit (c_parser *parser, tree list)
{
location_t num_thread_limit_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<thread_limit%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_THREAD_LIMIT,
"thread_limit");
c = build_omp_clause (num_thread_limit_loc, OMP_CLAUSE_THREAD_LIMIT);
OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.0:
aligned ( variable-list )
aligned ( variable-list : constant-expression ) */
static tree
c_parser_omp_clause_aligned (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree nl, c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_ALIGNED, list);
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree alignment = expr.value;
alignment = c_fully_fold (alignment, false, NULL);
if (TREE_CODE (alignment) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (alignment))
|| tree_int_cst_sgn (alignment) != 1)
{
error_at (clause_loc, "%<aligned%> clause alignment expression must "
"be positive constant integer expression");
alignment = NULL_TREE;
}
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = alignment;
}
parens.skip_until_found_close (parser);
return nl;
}
/* OpenMP 4.0:
linear ( variable-list )
linear ( variable-list : expression )
OpenMP 4.5:
linear ( modifier ( variable-list ) )
linear ( modifier ( variable-list ) : expression ) */
static tree
c_parser_omp_clause_linear (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree nl, c, step;
enum omp_clause_linear_kind kind = OMP_CLAUSE_LINEAR_DEFAULT;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *tok = c_parser_peek_token (parser);
const char *p = IDENTIFIER_POINTER (tok->value);
if (strcmp ("val", p) == 0)
kind = OMP_CLAUSE_LINEAR_VAL;
if (c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN)
kind = OMP_CLAUSE_LINEAR_DEFAULT;
if (kind != OMP_CLAUSE_LINEAR_DEFAULT)
{
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
}
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_LINEAR, list);
if (kind != OMP_CLAUSE_LINEAR_DEFAULT)
parens.skip_until_found_close (parser);
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
step = expr.value;
step = c_fully_fold (step, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (step)))
{
error_at (clause_loc, "%<linear%> clause step expression must "
"be integral");
step = integer_one_node;
}
}
else
step = integer_one_node;
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
{
OMP_CLAUSE_LINEAR_STEP (c) = step;
OMP_CLAUSE_LINEAR_KIND (c) = kind;
}
parens.skip_until_found_close (parser);
return nl;
}
/* OpenMP 4.0:
safelen ( constant-expression ) */
static tree
c_parser_omp_clause_safelen (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree c, t;
matching_parens parens;
if (!parens.require_open (parser))
return list;
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
t = expr.value;
t = c_fully_fold (t, false, NULL);
if (TREE_CODE (t) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (t))
|| tree_int_cst_sgn (t) != 1)
{
error_at (clause_loc, "%<safelen%> clause expression must "
"be positive constant integer expression");
t = NULL_TREE;
}
parens.skip_until_found_close (parser);
if (t == NULL_TREE || t == error_mark_node)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_SAFELEN, "safelen");
c = build_omp_clause (clause_loc, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
simdlen ( constant-expression ) */
static tree
c_parser_omp_clause_simdlen (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree c, t;
matching_parens parens;
if (!parens.require_open (parser))
return list;
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
t = expr.value;
t = c_fully_fold (t, false, NULL);
if (TREE_CODE (t) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (t))
|| tree_int_cst_sgn (t) != 1)
{
error_at (clause_loc, "%<simdlen%> clause expression must "
"be positive constant integer expression");
t = NULL_TREE;
}
parens.skip_until_found_close (parser);
if (t == NULL_TREE || t == error_mark_node)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_SIMDLEN, "simdlen");
c = build_omp_clause (clause_loc, OMP_CLAUSE_SIMDLEN);
OMP_CLAUSE_SIMDLEN_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.5:
vec:
identifier [+/- integer]
vec , identifier [+/- integer]
*/
static tree
c_parser_omp_clause_depend_sink (c_parser *parser, location_t clause_loc,
tree list)
{
tree vec = NULL;
if (c_parser_next_token_is_not (parser, CPP_NAME)
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
c_parser_error (parser, "expected identifier");
return list;
}
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
tree t = lookup_name (c_parser_peek_token (parser)->value);
tree addend = NULL;
if (t == NULL_TREE)
{
undeclared_variable (c_parser_peek_token (parser)->location,
c_parser_peek_token (parser)->value);
t = error_mark_node;
}
c_parser_consume_token (parser);
bool neg = false;
if (c_parser_next_token_is (parser, CPP_MINUS))
neg = true;
else if (!c_parser_next_token_is (parser, CPP_PLUS))
{
addend = integer_zero_node;
neg = false;
goto add_to_vector;
}
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NUMBER))
{
c_parser_error (parser, "expected integer");
return list;
}
addend = c_parser_peek_token (parser)->value;
if (TREE_CODE (addend) != INTEGER_CST)
{
c_parser_error (parser, "expected integer");
return list;
}
c_parser_consume_token (parser);
add_to_vector:
if (t != error_mark_node)
{
vec = tree_cons (addend, t, vec);
if (neg)
OMP_CLAUSE_DEPEND_SINK_NEGATIVE (vec) = 1;
}
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
}
if (vec == NULL_TREE)
return list;
tree u = build_omp_clause (clause_loc, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DEPEND_KIND (u) = OMP_CLAUSE_DEPEND_SINK;
OMP_CLAUSE_DECL (u) = nreverse (vec);
OMP_CLAUSE_CHAIN (u) = list;
return u;
}
/* OpenMP 4.0:
depend ( depend-kind: variable-list )
depend-kind:
in | out | inout
OpenMP 4.5:
depend ( source )
depend ( sink : vec ) */
static tree
c_parser_omp_clause_depend (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_INOUT;
tree nl, c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp ("in", p) == 0)
kind = OMP_CLAUSE_DEPEND_IN;
else if (strcmp ("inout", p) == 0)
kind = OMP_CLAUSE_DEPEND_INOUT;
else if (strcmp ("out", p) == 0)
kind = OMP_CLAUSE_DEPEND_OUT;
else if (strcmp ("source", p) == 0)
kind = OMP_CLAUSE_DEPEND_SOURCE;
else if (strcmp ("sink", p) == 0)
kind = OMP_CLAUSE_DEPEND_SINK;
else
goto invalid_kind;
}
else
goto invalid_kind;
c_parser_consume_token (parser);
if (kind == OMP_CLAUSE_DEPEND_SOURCE)
{
c = build_omp_clause (clause_loc, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DEPEND_KIND (c) = kind;
OMP_CLAUSE_DECL (c) = NULL_TREE;
OMP_CLAUSE_CHAIN (c) = list;
parens.skip_until_found_close (parser);
return c;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
goto resync_fail;
if (kind == OMP_CLAUSE_DEPEND_SINK)
nl = c_parser_omp_clause_depend_sink (parser, clause_loc, list);
else
{
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_DEPEND, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_DEPEND_KIND (c) = kind;
}
parens.skip_until_found_close (parser);
return nl;
invalid_kind:
c_parser_error (parser, "invalid depend kind");
resync_fail:
parens.skip_until_found_close (parser);
return list;
}
/* OpenMP 4.0:
map ( map-kind: variable-list )
map ( variable-list )
map-kind:
alloc | to | from | tofrom
OpenMP 4.5:
map-kind:
alloc | to | from | tofrom | release | delete
map ( always [,] map-kind: variable-list ) */
static tree
c_parser_omp_clause_map (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
enum gomp_map_kind kind = GOMP_MAP_TOFROM;
int always = 0;
enum c_id_kind always_id_kind = C_ID_NONE;
location_t always_loc = UNKNOWN_LOCATION;
tree always_id = NULL_TREE;
tree nl, c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *tok = c_parser_peek_token (parser);
const char *p = IDENTIFIER_POINTER (tok->value);
always_id_kind = tok->id_kind;
always_loc = tok->location;
always_id = tok->value;
if (strcmp ("always", p) == 0)
{
c_token *sectok = c_parser_peek_2nd_token (parser);
if (sectok->type == CPP_COMMA)
{
c_parser_consume_token (parser);
c_parser_consume_token (parser);
always = 2;
}
else if (sectok->type == CPP_NAME)
{
p = IDENTIFIER_POINTER (sectok->value);
if (strcmp ("alloc", p) == 0
|| strcmp ("to", p) == 0
|| strcmp ("from", p) == 0
|| strcmp ("tofrom", p) == 0
|| strcmp ("release", p) == 0
|| strcmp ("delete", p) == 0)
{
c_parser_consume_token (parser);
always = 1;
}
}
}
}
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp ("alloc", p) == 0)
kind = GOMP_MAP_ALLOC;
else if (strcmp ("to", p) == 0)
kind = always ? GOMP_MAP_ALWAYS_TO : GOMP_MAP_TO;
else if (strcmp ("from", p) == 0)
kind = always ? GOMP_MAP_ALWAYS_FROM : GOMP_MAP_FROM;
else if (strcmp ("tofrom", p) == 0)
kind = always ? GOMP_MAP_ALWAYS_TOFROM : GOMP_MAP_TOFROM;
else if (strcmp ("release", p) == 0)
kind = GOMP_MAP_RELEASE;
else if (strcmp ("delete", p) == 0)
kind = GOMP_MAP_DELETE;
else
{
c_parser_error (parser, "invalid map kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return list;
}
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else if (always)
{
if (always_id_kind != C_ID_ID)
{
c_parser_error (parser, "expected identifier");
parens.skip_until_found_close (parser);
return list;
}
tree t = lookup_name (always_id);
if (t == NULL_TREE)
{
undeclared_variable (always_loc, always_id);
t = error_mark_node;
}
if (t != error_mark_node)
{
tree u = build_omp_clause (clause_loc, OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (u) = t;
OMP_CLAUSE_CHAIN (u) = list;
OMP_CLAUSE_SET_MAP_KIND (u, kind);
list = u;
}
if (always == 1)
{
parens.skip_until_found_close (parser);
return list;
}
}
nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_MAP, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_SET_MAP_KIND (c, kind);
parens.skip_until_found_close (parser);
return nl;
}
/* OpenMP 4.0:
device ( expression ) */
static tree
c_parser_omp_clause_device (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_DEVICE, "device");
c = build_omp_clause (clause_loc, OMP_CLAUSE_DEVICE);
OMP_CLAUSE_DEVICE_ID (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.0:
dist_schedule ( static )
dist_schedule ( static , expression ) */
static tree
c_parser_omp_clause_dist_schedule (c_parser *parser, tree list)
{
tree c, t = NULL_TREE;
location_t loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (!c_parser_next_token_is_keyword (parser, RID_STATIC))
{
c_parser_error (parser, "invalid dist_schedule kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return list;
}
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<,%> or %<)%>");
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule");
if (t == error_mark_node)
return list;
c = build_omp_clause (loc, OMP_CLAUSE_DIST_SCHEDULE);
OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
proc_bind ( proc-bind-kind )
proc-bind-kind:
master | close | spread */
static tree
c_parser_omp_clause_proc_bind (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
enum omp_clause_proc_bind_kind kind;
tree c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp ("master", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_MASTER;
else if (strcmp ("close", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_CLOSE;
else if (strcmp ("spread", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_SPREAD;
else
goto invalid_kind;
}
else
goto invalid_kind;
c_parser_consume_token (parser);
parens.skip_until_found_close (parser);
c = build_omp_clause (clause_loc, OMP_CLAUSE_PROC_BIND);
OMP_CLAUSE_PROC_BIND_KIND (c) = kind;
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
c_parser_error (parser, "invalid proc_bind kind");
parens.skip_until_found_close (parser);
return list;
}
/* OpenMP 4.0:
to ( variable-list ) */
static tree
c_parser_omp_clause_to (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO, list);
}
/* OpenMP 4.0:
from ( variable-list ) */
static tree
c_parser_omp_clause_from (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FROM, list);
}
/* OpenMP 4.0:
uniform ( variable-list ) */
static tree
c_parser_omp_clause_uniform (c_parser *parser, tree list)
{
/* The clauses location. */
location_t loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
list = c_parser_omp_variable_list (parser, loc, OMP_CLAUSE_UNIFORM,
list);
parens.skip_until_found_close (parser);
}
return list;
}
/* Parse all OpenACC clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found. */
static tree
c_parser_oacc_all_clauses (c_parser *parser, omp_clause_mask mask,
const char *where, bool finish_p = true)
{
tree clauses = NULL;
bool first = true;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
location_t here;
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (!first && c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
here = c_parser_peek_token (parser)->location;
c_kind = c_parser_omp_clause_name (parser);
switch (c_kind)
{
case PRAGMA_OACC_CLAUSE_ASYNC:
clauses = c_parser_oacc_clause_async (parser, clauses);
c_name = "async";
break;
case PRAGMA_OACC_CLAUSE_AUTO:
clauses = c_parser_oacc_simple_clause (parser, OMP_CLAUSE_AUTO,
clauses);
c_name = "auto";
break;
case PRAGMA_OACC_CLAUSE_COLLAPSE:
clauses = c_parser_omp_clause_collapse (parser, clauses);
c_name = "collapse";
break;
case PRAGMA_OACC_CLAUSE_COPY:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copy";
break;
case PRAGMA_OACC_CLAUSE_COPYIN:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copyin";
break;
case PRAGMA_OACC_CLAUSE_COPYOUT:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copyout";
break;
case PRAGMA_OACC_CLAUSE_CREATE:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "create";
break;
case PRAGMA_OACC_CLAUSE_DELETE:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "delete";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = c_parser_omp_clause_default (parser, clauses, true);
c_name = "default";
break;
case PRAGMA_OACC_CLAUSE_DEVICE:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "device";
break;
case PRAGMA_OACC_CLAUSE_DEVICEPTR:
clauses = c_parser_oacc_data_clause_deviceptr (parser, clauses);
c_name = "deviceptr";
break;
case PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "device_resident";
break;
case PRAGMA_OACC_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
break;
case PRAGMA_OACC_CLAUSE_GANG:
c_name = "gang";
clauses = c_parser_oacc_shape_clause (parser, OMP_CLAUSE_GANG,
c_name, clauses);
break;
case PRAGMA_OACC_CLAUSE_HOST:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "host";
break;
case PRAGMA_OACC_CLAUSE_IF:
clauses = c_parser_omp_clause_if (parser, clauses, false);
c_name = "if";
break;
case PRAGMA_OACC_CLAUSE_INDEPENDENT:
clauses = c_parser_oacc_simple_clause (parser, OMP_CLAUSE_INDEPENDENT,
clauses);
c_name = "independent";
break;
case PRAGMA_OACC_CLAUSE_LINK:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "link";
break;
case PRAGMA_OACC_CLAUSE_NUM_GANGS:
clauses = c_parser_oacc_single_int_clause (parser,
OMP_CLAUSE_NUM_GANGS,
clauses);
c_name = "num_gangs";
break;
case PRAGMA_OACC_CLAUSE_NUM_WORKERS:
clauses = c_parser_oacc_single_int_clause (parser,
OMP_CLAUSE_NUM_WORKERS,
clauses);
c_name = "num_workers";
break;
case PRAGMA_OACC_CLAUSE_PRESENT:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present";
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present_or_copy";
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present_or_copyin";
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present_or_copyout";
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present_or_create";
break;
case PRAGMA_OACC_CLAUSE_PRIVATE:
clauses = c_parser_omp_clause_private (parser, clauses);
c_name = "private";
break;
case PRAGMA_OACC_CLAUSE_REDUCTION:
clauses = c_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OACC_CLAUSE_SELF:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "self";
break;
case PRAGMA_OACC_CLAUSE_SEQ:
clauses = c_parser_oacc_simple_clause (parser, OMP_CLAUSE_SEQ,
clauses);
c_name = "seq";
break;
case PRAGMA_OACC_CLAUSE_TILE:
clauses = c_parser_oacc_clause_tile (parser, clauses);
c_name = "tile";
break;
case PRAGMA_OACC_CLAUSE_USE_DEVICE:
clauses = c_parser_omp_clause_use_device_ptr (parser, clauses);
c_name = "use_device";
break;
case PRAGMA_OACC_CLAUSE_VECTOR:
c_name = "vector";
clauses = c_parser_oacc_shape_clause (parser, OMP_CLAUSE_VECTOR,
c_name, clauses);
break;
case PRAGMA_OACC_CLAUSE_VECTOR_LENGTH:
clauses = c_parser_oacc_single_int_clause (parser,
OMP_CLAUSE_VECTOR_LENGTH,
clauses);
c_name = "vector_length";
break;
case PRAGMA_OACC_CLAUSE_WAIT:
clauses = c_parser_oacc_clause_wait (parser, clauses);
c_name = "wait";
break;
case PRAGMA_OACC_CLAUSE_WORKER:
c_name = "worker";
clauses = c_parser_oacc_shape_clause (parser, OMP_CLAUSE_WORKER,
c_name, clauses);
break;
default:
c_parser_error (parser, "expected %<#pragma acc%> clause");
goto saw_error;
}
first = false;
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (here, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
c_parser_skip_to_pragma_eol (parser);
if (finish_p)
return c_finish_omp_clauses (clauses, C_ORT_ACC);
return clauses;
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found. */
static tree
c_parser_omp_all_clauses (c_parser *parser, omp_clause_mask mask,
const char *where, bool finish_p = true)
{
tree clauses = NULL;
bool first = true;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
location_t here;
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (!first && c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
here = c_parser_peek_token (parser)->location;
c_kind = c_parser_omp_clause_name (parser);
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = c_parser_omp_clause_collapse (parser, clauses);
c_name = "collapse";
break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = c_parser_omp_clause_copyin (parser, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = c_parser_omp_clause_copyprivate (parser, clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = c_parser_omp_clause_default (parser, clauses, false);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_FINAL:
clauses = c_parser_omp_clause_final (parser, clauses);
c_name = "final";
break;
case PRAGMA_OMP_CLAUSE_GRAINSIZE:
clauses = c_parser_omp_clause_grainsize (parser, clauses);
c_name = "grainsize";
break;
case PRAGMA_OMP_CLAUSE_HINT:
clauses = c_parser_omp_clause_hint (parser, clauses);
c_name = "hint";
break;
case PRAGMA_OMP_CLAUSE_DEFAULTMAP:
clauses = c_parser_omp_clause_defaultmap (parser, clauses);
c_name = "defaultmap";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = c_parser_omp_clause_if (parser, clauses, true);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = c_parser_omp_clause_lastprivate (parser, clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_MERGEABLE:
clauses = c_parser_omp_clause_mergeable (parser, clauses);
c_name = "mergeable";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = c_parser_omp_clause_nowait (parser, clauses);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_TASKS:
clauses = c_parser_omp_clause_num_tasks (parser, clauses);
c_name = "num_tasks";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = c_parser_omp_clause_num_threads (parser, clauses);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = c_parser_omp_clause_ordered (parser, clauses);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIORITY:
clauses = c_parser_omp_clause_priority (parser, clauses);
c_name = "priority";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = c_parser_omp_clause_private (parser, clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses = c_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = c_parser_omp_clause_schedule (parser, clauses);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = c_parser_omp_clause_shared (parser, clauses);
c_name = "shared";
break;
case PRAGMA_OMP_CLAUSE_UNTIED:
clauses = c_parser_omp_clause_untied (parser, clauses);
c_name = "untied";
break;
case PRAGMA_OMP_CLAUSE_INBRANCH:
clauses = c_parser_omp_clause_branch (parser, OMP_CLAUSE_INBRANCH,
clauses);
c_name = "inbranch";
break;
case PRAGMA_OMP_CLAUSE_NOTINBRANCH:
clauses = c_parser_omp_clause_branch (parser, OMP_CLAUSE_NOTINBRANCH,
clauses);
c_name = "notinbranch";
break;
case PRAGMA_OMP_CLAUSE_PARALLEL:
clauses
= c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_PARALLEL,
clauses);
c_name = "parallel";
if (!first)
{
clause_not_first:
error_at (here, "%qs must be the first clause of %qs",
c_name, where);
clauses = prev;
}
break;
case PRAGMA_OMP_CLAUSE_FOR:
clauses
= c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_FOR,
clauses);
c_name = "for";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_SECTIONS:
clauses
= c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_SECTIONS,
clauses);
c_name = "sections";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_TASKGROUP:
clauses
= c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_TASKGROUP,
clauses);
c_name = "taskgroup";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_LINK:
clauses
= c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LINK, clauses);
c_name = "link";
break;
case PRAGMA_OMP_CLAUSE_TO:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINK)) != 0)
clauses
= c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO_DECLARE,
clauses);
else
clauses = c_parser_omp_clause_to (parser, clauses);
c_name = "to";
break;
case PRAGMA_OMP_CLAUSE_FROM:
clauses = c_parser_omp_clause_from (parser, clauses);
c_name = "from";
break;
case PRAGMA_OMP_CLAUSE_UNIFORM:
clauses = c_parser_omp_clause_uniform (parser, clauses);
c_name = "uniform";
break;
case PRAGMA_OMP_CLAUSE_NUM_TEAMS:
clauses = c_parser_omp_clause_num_teams (parser, clauses);
c_name = "num_teams";
break;
case PRAGMA_OMP_CLAUSE_THREAD_LIMIT:
clauses = c_parser_omp_clause_thread_limit (parser, clauses);
c_name = "thread_limit";
break;
case PRAGMA_OMP_CLAUSE_ALIGNED:
clauses = c_parser_omp_clause_aligned (parser, clauses);
c_name = "aligned";
break;
case PRAGMA_OMP_CLAUSE_LINEAR:
clauses = c_parser_omp_clause_linear (parser, clauses);
c_name = "linear";
break;
case PRAGMA_OMP_CLAUSE_DEPEND:
clauses = c_parser_omp_clause_depend (parser, clauses);
c_name = "depend";
break;
case PRAGMA_OMP_CLAUSE_MAP:
clauses = c_parser_omp_clause_map (parser, clauses);
c_name = "map";
break;
case PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR:
clauses = c_parser_omp_clause_use_device_ptr (parser, clauses);
c_name = "use_device_ptr";
break;
case PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR:
clauses = c_parser_omp_clause_is_device_ptr (parser, clauses);
c_name = "is_device_ptr";
break;
case PRAGMA_OMP_CLAUSE_DEVICE:
clauses = c_parser_omp_clause_device (parser, clauses);
c_name = "device";
break;
case PRAGMA_OMP_CLAUSE_DIST_SCHEDULE:
clauses = c_parser_omp_clause_dist_schedule (parser, clauses);
c_name = "dist_schedule";
break;
case PRAGMA_OMP_CLAUSE_PROC_BIND:
clauses = c_parser_omp_clause_proc_bind (parser, clauses);
c_name = "proc_bind";
break;
case PRAGMA_OMP_CLAUSE_SAFELEN:
clauses = c_parser_omp_clause_safelen (parser, clauses);
c_name = "safelen";
break;
case PRAGMA_OMP_CLAUSE_SIMDLEN:
clauses = c_parser_omp_clause_simdlen (parser, clauses);
c_name = "simdlen";
break;
case PRAGMA_OMP_CLAUSE_NOGROUP:
clauses = c_parser_omp_clause_nogroup (parser, clauses);
c_name = "nogroup";
break;
case PRAGMA_OMP_CLAUSE_THREADS:
clauses
= c_parser_omp_clause_orderedkind (parser, OMP_CLAUSE_THREADS,
clauses);
c_name = "threads";
break;
case PRAGMA_OMP_CLAUSE_SIMD:
clauses
= c_parser_omp_clause_orderedkind (parser, OMP_CLAUSE_SIMD,
clauses);
c_name = "simd";
break;
default:
c_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
first = false;
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (here, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
c_parser_skip_to_pragma_eol (parser);
if (finish_p)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM)) != 0)
return c_finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD);
return c_finish_omp_clauses (clauses, C_ORT_OMP);
}
return clauses;
}
/* OpenACC 2.0, OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
c_parser_statement calls add_stmt. */
static tree
c_parser_omp_structured_block (c_parser *parser, bool *if_p)
{
tree stmt = push_stmt_list ();
c_parser_statement (parser, if_p);
return pop_stmt_list (stmt);
}
/* OpenACC 2.0:
# pragma acc cache (variable-list) new-line
LOC is the location of the #pragma token.
*/
static tree
c_parser_oacc_cache (location_t loc, c_parser *parser)
{
tree stmt, clauses;
clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE__CACHE_, NULL);
clauses = c_finish_omp_clauses (clauses, C_ORT_ACC);
c_parser_skip_to_pragma_eol (parser);
stmt = make_node (OACC_CACHE);
TREE_TYPE (stmt) = void_type_node;
OACC_CACHE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
return stmt;
}
/* OpenACC 2.0:
# pragma acc data oacc-data-clause[optseq] new-line
structured-block
LOC is the location of the #pragma token.
*/
#define OACC_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) )
static tree
c_parser_oacc_data (location_t loc, c_parser *parser, bool *if_p)
{
tree stmt, clauses, block;
clauses = c_parser_oacc_all_clauses (parser, OACC_DATA_CLAUSE_MASK,
"#pragma acc data");
block = c_begin_omp_parallel ();
add_stmt (c_parser_omp_structured_block (parser, if_p));
stmt = c_finish_oacc_data (loc, clauses, block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc declare oacc-data-clause[optseq] new-line
*/
#define OACC_DECLARE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_LINK) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) )
static void
c_parser_oacc_declare (c_parser *parser)
{
location_t pragma_loc = c_parser_peek_token (parser)->location;
tree clauses, stmt, t, decl;
bool error = false;
c_parser_consume_pragma (parser);
clauses = c_parser_oacc_all_clauses (parser, OACC_DECLARE_CLAUSE_MASK,
"#pragma acc declare");
if (!clauses)
{
error_at (pragma_loc,
"no valid clauses specified in %<#pragma acc declare%>");
return;
}
for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
{
location_t loc = OMP_CLAUSE_LOCATION (t);
decl = OMP_CLAUSE_DECL (t);
if (!DECL_P (decl))
{
error_at (loc, "array section in %<#pragma acc declare%>");
error = true;
continue;
}
switch (OMP_CLAUSE_MAP_KIND (t))
{
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_DEVICEPTR:
case GOMP_MAP_DEVICE_RESIDENT:
break;
case GOMP_MAP_LINK:
if (!global_bindings_p ()
&& (TREE_STATIC (decl)
|| !DECL_EXTERNAL (decl)))
{
error_at (loc,
"%qD must be a global variable in "
"%<#pragma acc declare link%>",
decl);
error = true;
continue;
}
break;
default:
if (global_bindings_p ())
{
error_at (loc, "invalid OpenACC clause at file scope");
error = true;
continue;
}
if (DECL_EXTERNAL (decl))
{
error_at (loc,
"invalid use of %<extern%> variable %qD "
"in %<#pragma acc declare%>", decl);
error = true;
continue;
}
else if (TREE_PUBLIC (decl))
{
error_at (loc,
"invalid use of %<global%> variable %qD "
"in %<#pragma acc declare%>", decl);
error = true;
continue;
}
break;
}
if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl))
|| lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
{
error_at (loc, "variable %qD used more than once with "
"%<#pragma acc declare%>", decl);
error = true;
continue;
}
if (!error)
{
tree id;
if (OMP_CLAUSE_MAP_KIND (t) == GOMP_MAP_LINK)
id = get_identifier ("omp declare target link");
else
id = get_identifier ("omp declare target");
DECL_ATTRIBUTES (decl)
= tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (decl));
if (global_bindings_p ())
{
symtab_node *node = symtab_node::get (decl);
if (node != NULL)
{
node->offloadable = 1;
if (ENABLE_OFFLOADING)
{
g->have_offload = true;
if (is_a <varpool_node *> (node))
vec_safe_push (offload_vars, decl);
}
}
}
}
}
if (error || global_bindings_p ())
return;
stmt = make_node (OACC_DECLARE);
TREE_TYPE (stmt) = void_type_node;
OACC_DECLARE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, pragma_loc);
add_stmt (stmt);
return;
}
/* OpenACC 2.0:
# pragma acc enter data oacc-enter-data-clause[optseq] new-line
or
# pragma acc exit data oacc-exit-data-clause[optseq] new-line
LOC is the location of the #pragma token.
*/
#define OACC_ENTER_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
#define OACC_EXIT_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DELETE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
static void
c_parser_oacc_enter_exit_data (c_parser *parser, bool enter)
{
location_t loc = c_parser_peek_token (parser)->location;
tree clauses, stmt;
const char *p = "";
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
if (strcmp (p, "data") != 0)
{
error_at (loc, "expected %<data%> after %<#pragma acc %s%>",
enter ? "enter" : "exit");
parser->error = true;
c_parser_skip_to_pragma_eol (parser);
return;
}
if (enter)
clauses = c_parser_oacc_all_clauses (parser, OACC_ENTER_DATA_CLAUSE_MASK,
"#pragma acc enter data");
else
clauses = c_parser_oacc_all_clauses (parser, OACC_EXIT_DATA_CLAUSE_MASK,
"#pragma acc exit data");
if (omp_find_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE)
{
error_at (loc, "%<#pragma acc %s data%> has no data movement clause",
enter ? "enter" : "exit");
return;
}
stmt = enter ? make_node (OACC_ENTER_DATA) : make_node (OACC_EXIT_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_STANDALONE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
}
/* OpenACC 2.0:
# pragma acc host_data oacc-data-clause[optseq] new-line
structured-block
*/
#define OACC_HOST_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_USE_DEVICE) )
static tree
c_parser_oacc_host_data (location_t loc, c_parser *parser, bool *if_p)
{
tree stmt, clauses, block;
clauses = c_parser_oacc_all_clauses (parser, OACC_HOST_DATA_CLAUSE_MASK,
"#pragma acc host_data");
block = c_begin_omp_parallel ();
add_stmt (c_parser_omp_structured_block (parser, if_p));
stmt = c_finish_oacc_host_data (loc, clauses, block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc loop oacc-loop-clause[optseq] new-line
structured-block
LOC is the location of the #pragma token.
*/
#define OACC_LOOP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_GANG) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WORKER) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_AUTO) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_INDEPENDENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SEQ) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_TILE) )
static tree
c_parser_oacc_loop (location_t loc, c_parser *parser, char *p_name,
omp_clause_mask mask, tree *cclauses, bool *if_p)
{
bool is_parallel = ((mask >> PRAGMA_OACC_CLAUSE_REDUCTION) & 1) == 1;
strcat (p_name, " loop");
mask |= OACC_LOOP_CLAUSE_MASK;
tree clauses = c_parser_oacc_all_clauses (parser, mask, p_name,
cclauses == NULL);
if (cclauses)
{
clauses = c_oacc_split_loop_clauses (clauses, cclauses, is_parallel);
if (*cclauses)
*cclauses = c_finish_omp_clauses (*cclauses, C_ORT_ACC);
if (clauses)
clauses = c_finish_omp_clauses (clauses, C_ORT_ACC);
}
tree block = c_begin_compound_stmt (true);
tree stmt = c_parser_omp_for_loop (loc, parser, OACC_LOOP, clauses, NULL,
if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc kernels oacc-kernels-clause[optseq] new-line
structured-block
or
# pragma acc parallel oacc-parallel-clause[optseq] new-line
structured-block
LOC is the location of the #pragma token.
*/
#define OACC_KERNELS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
#define OACC_PARALLEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
static tree
c_parser_oacc_kernels_parallel (location_t loc, c_parser *parser,
enum pragma_kind p_kind, char *p_name,
bool *if_p)
{
omp_clause_mask mask;
enum tree_code code;
switch (p_kind)
{
case PRAGMA_OACC_KERNELS:
strcat (p_name, " kernels");
mask = OACC_KERNELS_CLAUSE_MASK;
code = OACC_KERNELS;
break;
case PRAGMA_OACC_PARALLEL:
strcat (p_name, " parallel");
mask = OACC_PARALLEL_CLAUSE_MASK;
code = OACC_PARALLEL;
break;
default:
gcc_unreachable ();
}
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "loop") == 0)
{
c_parser_consume_token (parser);
tree block = c_begin_omp_parallel ();
tree clauses;
c_parser_oacc_loop (loc, parser, p_name, mask, &clauses, if_p);
return c_finish_omp_construct (loc, code, block, clauses);
}
}
tree clauses = c_parser_oacc_all_clauses (parser, mask, p_name);
tree block = c_begin_omp_parallel ();
add_stmt (c_parser_omp_structured_block (parser, if_p));
return c_finish_omp_construct (loc, code, block, clauses);
}
/* OpenACC 2.0:
# pragma acc routine oacc-routine-clause[optseq] new-line
function-definition
# pragma acc routine ( name ) oacc-routine-clause[optseq] new-line
*/
#define OACC_ROUTINE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_GANG) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WORKER) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SEQ) )
/* Parse an OpenACC routine directive. For named directives, we apply
immediately to the named function. For unnamed ones we then parse
a declaration or definition, which must be for a function. */
static void
c_parser_oacc_routine (c_parser *parser, enum pragma_context context)
{
gcc_checking_assert (context == pragma_external);
oacc_routine_data data;
data.error_seen = false;
data.fndecl_seen = false;
data.clauses = NULL_TREE;
data.loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
/* Look for optional '( name )'. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser); /* '(' */
tree decl = NULL_TREE;
c_token *name_token = c_parser_peek_token (parser);
location_t name_loc = name_token->location;
if (name_token->type == CPP_NAME
&& (name_token->id_kind == C_ID_ID
|| name_token->id_kind == C_ID_TYPENAME))
{
decl = lookup_name (name_token->value);
if (!decl)
error_at (name_loc,
"%qE has not been declared", name_token->value);
c_parser_consume_token (parser);
}
else
c_parser_error (parser, "expected function name");
if (!decl
|| !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_to_pragma_eol (parser, false);
return;
}
data.clauses
= c_parser_oacc_all_clauses (parser, OACC_ROUTINE_CLAUSE_MASK,
"#pragma acc routine");
if (TREE_CODE (decl) != FUNCTION_DECL)
{
error_at (name_loc, "%qD does not refer to a function", decl);
return;
}
c_finish_oacc_routine (&data, decl, false);
}
else /* No optional '( name )'. */
{
data.clauses
= c_parser_oacc_all_clauses (parser, OACC_ROUTINE_CLAUSE_MASK,
"#pragma acc routine");
/* Emit a helpful diagnostic if there's another pragma following this
one. Also don't allow a static assertion declaration, as in the
following we'll just parse a *single* "declaration or function
definition", and the static assertion counts an one. */
if (c_parser_next_token_is (parser, CPP_PRAGMA)
|| c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT))
{
error_at (data.loc,
"%<#pragma acc routine%> not immediately followed by"
" function declaration or definition");
/* ..., and then just keep going. */
return;
}
/* We only have to consider the pragma_external case here. */
if (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION)
{
int ext = disable_extension_diagnostics ();
do
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION);
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, vNULL, &data);
restore_extension_diagnostics (ext);
}
else
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, vNULL, &data);
}
}
/* Finalize an OpenACC routine pragma, applying it to FNDECL.
IS_DEFN is true if we're applying it to the definition. */
static void
c_finish_oacc_routine (struct oacc_routine_data *data, tree fndecl,
bool is_defn)
{
/* Keep going if we're in error reporting mode. */
if (data->error_seen
|| fndecl == error_mark_node)
return;
if (data->fndecl_seen)
{
error_at (data->loc,
"%<#pragma acc routine%> not immediately followed by"
" a single function declaration or definition");
data->error_seen = true;
return;
}
if (fndecl == NULL_TREE || TREE_CODE (fndecl) != FUNCTION_DECL)
{
error_at (data->loc,
"%<#pragma acc routine%> not immediately followed by"
" function declaration or definition");
data->error_seen = true;
return;
}
if (oacc_get_fn_attrib (fndecl))
{
error_at (data->loc,
"%<#pragma acc routine%> already applied to %qD", fndecl);
data->error_seen = true;
return;
}
if (TREE_USED (fndecl) || (!is_defn && DECL_SAVED_TREE (fndecl)))
{
error_at (data->loc,
TREE_USED (fndecl)
? G_("%<#pragma acc routine%> must be applied before use")
: G_("%<#pragma acc routine%> must be applied before "
"definition"));
data->error_seen = true;
return;
}
/* Process the routine's dimension clauses. */
tree dims = oacc_build_routine_dims (data->clauses);
oacc_replace_fn_attrib (fndecl, dims);
/* Add an "omp declare target" attribute. */
DECL_ATTRIBUTES (fndecl)
= tree_cons (get_identifier ("omp declare target"),
NULL_TREE, DECL_ATTRIBUTES (fndecl));
/* Remember that we've used this "#pragma acc routine". */
data->fndecl_seen = true;
}
/* OpenACC 2.0:
# pragma acc update oacc-update-clause[optseq] new-line
*/
#define OACC_UPDATE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_HOST) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SELF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
static void
c_parser_oacc_update (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
tree clauses = c_parser_oacc_all_clauses (parser, OACC_UPDATE_CLAUSE_MASK,
"#pragma acc update");
if (omp_find_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE)
{
error_at (loc,
"%<#pragma acc update%> must contain at least one "
"%<device%> or %<host%> or %<self%> clause");
return;
}
if (parser->error)
return;
tree stmt = make_node (OACC_UPDATE);
TREE_TYPE (stmt) = void_type_node;
OACC_UPDATE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
}
/* OpenACC 2.0:
# pragma acc wait [(intseq)] oacc-wait-clause[optseq] new-line
LOC is the location of the #pragma token.
*/
#define OACC_WAIT_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) )
static tree
c_parser_oacc_wait (location_t loc, c_parser *parser, char *p_name)
{
tree clauses, list = NULL_TREE, stmt = NULL_TREE;
if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN)
list = c_parser_oacc_wait_list (parser, loc, list);
strcpy (p_name, " wait");
clauses = c_parser_oacc_all_clauses (parser, OACC_WAIT_CLAUSE_MASK, p_name);
stmt = c_finish_oacc_wait (loc, list, clauses);
add_stmt (stmt);
return stmt;
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type.
OpenMP 3.1:
# pragma omp atomic new-line
update-stmt
# pragma omp atomic read new-line
read-stmt
# pragma omp atomic write new-line
write-stmt
# pragma omp atomic update new-line
update-stmt
# pragma omp atomic capture new-line
capture-stmt
# pragma omp atomic capture new-line
capture-block
read-stmt:
v = x
write-stmt:
x = expr
update-stmt:
expression-stmt | x = x binop expr
capture-stmt:
v = expression-stmt
capture-block:
{ v = x; update-stmt; } | { update-stmt; v = x; }
OpenMP 4.0:
update-stmt:
expression-stmt | x = x binop expr | x = expr binop x
capture-stmt:
v = update-stmt
capture-block:
{ v = x; update-stmt; } | { update-stmt; v = x; } | { v = x; x = expr; }
where x and v are lvalue expressions with scalar type.
LOC is the location of the #pragma token. */
static void
c_parser_omp_atomic (location_t loc, c_parser *parser)
{
tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE;
tree lhs1 = NULL_TREE, rhs1 = NULL_TREE;
tree stmt, orig_lhs, unfolded_lhs = NULL_TREE, unfolded_lhs1 = NULL_TREE;
enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR;
struct c_expr expr;
location_t eloc;
bool structured_block = false;
bool swapped = false;
bool seq_cst = false;
bool non_lvalue_p;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (!strcmp (p, "seq_cst"))
{
seq_cst = true;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA)
&& c_parser_peek_2nd_token (parser)->type == CPP_NAME)
c_parser_consume_token (parser);
}
}
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (!strcmp (p, "read"))
code = OMP_ATOMIC_READ;
else if (!strcmp (p, "write"))
code = NOP_EXPR;
else if (!strcmp (p, "update"))
code = OMP_ATOMIC;
else if (!strcmp (p, "capture"))
code = OMP_ATOMIC_CAPTURE_NEW;
else
p = NULL;
if (p)
c_parser_consume_token (parser);
}
if (!seq_cst)
{
if (c_parser_next_token_is (parser, CPP_COMMA)
&& c_parser_peek_2nd_token (parser)->type == CPP_NAME)
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (!strcmp (p, "seq_cst"))
{
seq_cst = true;
c_parser_consume_token (parser);
}
}
}
c_parser_skip_to_pragma_eol (parser);
switch (code)
{
case OMP_ATOMIC_READ:
case NOP_EXPR: /* atomic write */
v = c_parser_cast_expression (parser, NULL).value;
non_lvalue_p = !lvalue_p (v);
v = c_fully_fold (v, false, NULL, true);
if (v == error_mark_node)
goto saw_error;
if (non_lvalue_p)
v = non_lvalue (v);
loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
goto saw_error;
if (code == NOP_EXPR)
{
lhs = c_parser_expression (parser).value;
lhs = c_fully_fold (lhs, false, NULL);
if (lhs == error_mark_node)
goto saw_error;
}
else
{
lhs = c_parser_cast_expression (parser, NULL).value;
non_lvalue_p = !lvalue_p (lhs);
lhs = c_fully_fold (lhs, false, NULL, true);
if (lhs == error_mark_node)
goto saw_error;
if (non_lvalue_p)
lhs = non_lvalue (lhs);
}
if (code == NOP_EXPR)
{
/* atomic write is represented by OMP_ATOMIC with NOP_EXPR
opcode. */
code = OMP_ATOMIC;
rhs = lhs;
lhs = v;
v = NULL_TREE;
}
goto done;
case OMP_ATOMIC_CAPTURE_NEW:
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_consume_token (parser);
structured_block = true;
}
else
{
v = c_parser_cast_expression (parser, NULL).value;
non_lvalue_p = !lvalue_p (v);
v = c_fully_fold (v, false, NULL, true);
if (v == error_mark_node)
goto saw_error;
if (non_lvalue_p)
v = non_lvalue (v);
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
goto saw_error;
}
break;
default:
break;
}
/* For structured_block case we don't know yet whether
old or new x should be captured. */
restart:
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_cast_expression (parser, NULL);
lhs = expr.value;
expr = default_function_array_conversion (eloc, expr);
unfolded_lhs = expr.value;
lhs = c_fully_fold (lhs, false, NULL, true);
orig_lhs = lhs;
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
saw_error:
c_parser_skip_to_end_of_block_or_statement (parser);
if (structured_block)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
c_parser_consume_token (parser);
else if (code == OMP_ATOMIC_CAPTURE_NEW)
{
c_parser_skip_to_end_of_block_or_statement (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
c_parser_consume_token (parser);
}
}
return;
case POSTINCREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
unfolded_lhs = NULL_TREE;
opcode = PLUS_EXPR;
rhs = integer_one_node;
break;
case POSTDECREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
unfolded_lhs = NULL_TREE;
opcode = MINUS_EXPR;
rhs = integer_one_node;
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR
&& TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR
&& TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0)
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND
(TREE_OPERAND (lhs, 1), 0), 0)))
== BOOLEAN_TYPE)
/* Undo effects of boolean_increment for post {in,de}crement. */
lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_CODE (lhs) == MODIFY_EXPR
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE)
{
/* Undo effects of boolean_increment. */
if (integer_onep (TREE_OPERAND (lhs, 1)))
{
/* This is pre or post increment. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
unfolded_lhs = NULL_TREE;
opcode = NOP_EXPR;
if (code == OMP_ATOMIC_CAPTURE_NEW
&& !structured_block
&& TREE_CODE (orig_lhs) == COMPOUND_EXPR)
code = OMP_ATOMIC_CAPTURE_OLD;
break;
}
if (TREE_CODE (TREE_OPERAND (lhs, 1)) == TRUTH_NOT_EXPR
&& TREE_OPERAND (lhs, 0)
== TREE_OPERAND (TREE_OPERAND (lhs, 1), 0))
{
/* This is pre or post decrement. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
unfolded_lhs = NULL_TREE;
opcode = NOP_EXPR;
if (code == OMP_ATOMIC_CAPTURE_NEW
&& !structured_block
&& TREE_CODE (orig_lhs) == COMPOUND_EXPR)
code = OMP_ATOMIC_CAPTURE_OLD;
break;
}
}
/* FALLTHRU */
default:
if (!lvalue_p (unfolded_lhs))
lhs = non_lvalue (lhs);
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT_EQ:
opcode = MULT_EXPR;
break;
case CPP_DIV_EQ:
opcode = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
opcode = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
opcode = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
opcode = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
opcode = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
opcode = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
opcode = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
opcode = BIT_XOR_EXPR;
break;
case CPP_EQ:
c_parser_consume_token (parser);
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_expr_no_commas (parser, NULL, unfolded_lhs);
rhs1 = expr.value;
switch (TREE_CODE (rhs1))
{
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case RDIV_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
if (c_tree_equal (TREE_OPERAND (rhs1, 0), unfolded_lhs))
{
opcode = TREE_CODE (rhs1);
rhs = c_fully_fold (TREE_OPERAND (rhs1, 1), false, NULL,
true);
rhs1 = c_fully_fold (TREE_OPERAND (rhs1, 0), false, NULL,
true);
goto stmt_done;
}
if (c_tree_equal (TREE_OPERAND (rhs1, 1), unfolded_lhs))
{
opcode = TREE_CODE (rhs1);
rhs = c_fully_fold (TREE_OPERAND (rhs1, 0), false, NULL,
true);
rhs1 = c_fully_fold (TREE_OPERAND (rhs1, 1), false, NULL,
true);
swapped = !commutative_tree_code (opcode);
goto stmt_done;
}
break;
case ERROR_MARK:
goto saw_error;
default:
break;
}
if (c_parser_peek_token (parser)->type == CPP_SEMICOLON)
{
if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW)
{
code = OMP_ATOMIC_CAPTURE_OLD;
v = lhs;
lhs = NULL_TREE;
expr = default_function_array_read_conversion (eloc, expr);
unfolded_lhs1 = expr.value;
lhs1 = c_fully_fold (unfolded_lhs1, false, NULL, true);
rhs1 = NULL_TREE;
c_parser_consume_token (parser);
goto restart;
}
if (structured_block)
{
opcode = NOP_EXPR;
expr = default_function_array_read_conversion (eloc, expr);
rhs = c_fully_fold (expr.value, false, NULL, true);
rhs1 = NULL_TREE;
goto stmt_done;
}
}
c_parser_error (parser, "invalid form of %<#pragma omp atomic%>");
goto saw_error;
default:
c_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
/* Arrange to pass the location of the assignment operator to
c_finish_omp_atomic. */
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_expression (parser);
expr = default_function_array_read_conversion (eloc, expr);
rhs = expr.value;
rhs = c_fully_fold (rhs, false, NULL, true);
break;
}
stmt_done:
if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW)
{
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
goto saw_error;
v = c_parser_cast_expression (parser, NULL).value;
non_lvalue_p = !lvalue_p (v);
v = c_fully_fold (v, false, NULL, true);
if (v == error_mark_node)
goto saw_error;
if (non_lvalue_p)
v = non_lvalue (v);
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
goto saw_error;
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_cast_expression (parser, NULL);
lhs1 = expr.value;
expr = default_function_array_read_conversion (eloc, expr);
unfolded_lhs1 = expr.value;
lhs1 = c_fully_fold (lhs1, false, NULL, true);
if (lhs1 == error_mark_node)
goto saw_error;
if (!lvalue_p (unfolded_lhs1))
lhs1 = non_lvalue (lhs1);
}
if (structured_block)
{
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
c_parser_require (parser, CPP_CLOSE_BRACE, "expected %<}%>");
}
done:
if (unfolded_lhs && unfolded_lhs1
&& !c_tree_equal (unfolded_lhs, unfolded_lhs1))
{
error ("%<#pragma omp atomic capture%> uses two different "
"expressions for memory");
stmt = error_mark_node;
}
else
stmt = c_finish_omp_atomic (loc, code, opcode, lhs, rhs, v, lhs1, rhs1,
swapped, seq_cst);
if (stmt != error_mark_node)
add_stmt (stmt);
if (!structured_block)
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* OpenMP 2.5:
# pragma omp barrier new-line
*/
static void
c_parser_omp_barrier (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_barrier (loc);
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block
OpenMP 4.5:
# pragma omp critical [(name) [hint(expression)]] new-line
LOC is the location of the #pragma itself. */
#define OMP_CRITICAL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_HINT) )
static tree
c_parser_omp_critical (location_t loc, c_parser *parser, bool *if_p)
{
tree stmt, name = NULL_TREE, clauses = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
c_parser_error (parser, "expected identifier");
clauses = c_parser_omp_all_clauses (parser,
OMP_CRITICAL_CLAUSE_MASK,
"#pragma omp critical");
}
else
{
if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
}
stmt = c_parser_omp_structured_block (parser, if_p);
return c_finish_omp_critical (loc, stmt, name, clauses);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list ) */
static void
c_parser_omp_flush (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL);
else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_flush (loc);
}
/* Parse the restricted form of loop statements allowed by OpenACC and OpenMP.
The real trick here is to determine the loop control variable early
so that we can push a new decl if necessary to make it private.
LOC is the location of the "acc" or "omp" in "#pragma acc" or "#pragma omp",
respectively. */
static tree
c_parser_omp_for_loop (location_t loc, c_parser *parser, enum tree_code code,
tree clauses, tree *cclauses, bool *if_p)
{
tree decl, cond, incr, save_break, save_cont, body, init, stmt, cl;
tree declv, condv, incrv, initv, ret = NULL_TREE;
tree pre_body = NULL_TREE, this_pre_body;
tree ordered_cl = NULL_TREE;
bool fail = false, open_brace_parsed = false;
int i, collapse = 1, ordered = 0, count, nbraces = 0;
location_t for_loc;
bool tiling = false;
vec<tree, va_gc> *for_block = make_tree_vector ();
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl));
else if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_TILE)
{
tiling = true;
collapse = list_length (OMP_CLAUSE_TILE_LIST (cl));
}
else if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_ORDERED
&& OMP_CLAUSE_ORDERED_EXPR (cl))
{
ordered_cl = cl;
ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (cl));
}
if (ordered && ordered < collapse)
{
error_at (OMP_CLAUSE_LOCATION (ordered_cl),
"%<ordered%> clause parameter is less than %<collapse%>");
OMP_CLAUSE_ORDERED_EXPR (ordered_cl)
= build_int_cst (NULL_TREE, collapse);
ordered = collapse;
}
if (ordered)
{
for (tree *pc = &clauses; *pc; )
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LINEAR)
{
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<linear%> clause may not be specified together "
"with %<ordered%> clause with a parameter");
*pc = OMP_CLAUSE_CHAIN (*pc);
}
else
pc = &OMP_CLAUSE_CHAIN (*pc);
}
gcc_assert (tiling || (collapse >= 1 && ordered >= 0));
count = ordered ? ordered : collapse;
declv = make_tree_vec (count);
initv = make_tree_vec (count);
condv = make_tree_vec (count);
incrv = make_tree_vec (count);
if (!c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_error (parser, "for statement expected");
return NULL;
}
for_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
for (i = 0; i < count; i++)
{
int bracecount = 0;
matching_parens parens;
if (!parens.require_open (parser))
goto pop_scopes;
/* Parse the initialization declaration or expression. */
if (c_parser_next_tokens_start_declaration (parser))
{
if (i > 0)
vec_safe_push (for_block, c_begin_compound_stmt (true));
this_pre_body = push_stmt_list ();
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
NULL, vNULL);
if (this_pre_body)
{
this_pre_body = pop_stmt_list (this_pre_body);
if (pre_body)
{
tree t = pre_body;
pre_body = push_stmt_list ();
add_stmt (t);
add_stmt (this_pre_body);
pre_body = pop_stmt_list (pre_body);
}
else
pre_body = this_pre_body;
}
decl = check_for_loop_decls (for_loc, flag_isoc99);
if (decl == NULL)
goto error_init;
if (DECL_INITIAL (decl) == error_mark_node)
decl = error_mark_node;
init = decl;
}
else if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_EQ)
{
struct c_expr decl_exp;
struct c_expr init_exp;
location_t init_loc;
decl_exp = c_parser_postfix_expression (parser);
decl = decl_exp.value;
c_parser_require (parser, CPP_EQ, "expected %<=%>");
init_loc = c_parser_peek_token (parser)->location;
init_exp = c_parser_expr_no_commas (parser, NULL);
init_exp = default_function_array_read_conversion (init_loc,
init_exp);
init = build_modify_expr (init_loc, decl, decl_exp.original_type,
NOP_EXPR, init_loc, init_exp.value,
init_exp.original_type);
init = c_process_expr_stmt (init_loc, init);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
else
{
error_init:
c_parser_error (parser,
"expected iteration declaration or initialization");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
fail = true;
goto parse_next;
}
/* Parse the loop condition. */
cond = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_SEMICOLON))
{
location_t cond_loc = c_parser_peek_token (parser)->location;
struct c_expr cond_expr
= c_parser_binary_expression (parser, NULL, NULL_TREE);
cond = cond_expr.value;
cond = c_objc_common_truthvalue_conversion (cond_loc, cond);
if (COMPARISON_CLASS_P (cond))
{
tree op0 = TREE_OPERAND (cond, 0), op1 = TREE_OPERAND (cond, 1);
op0 = c_fully_fold (op0, false, NULL);
op1 = c_fully_fold (op1, false, NULL);
TREE_OPERAND (cond, 0) = op0;
TREE_OPERAND (cond, 1) = op1;
}
switch (cond_expr.original_code)
{
case GT_EXPR:
case GE_EXPR:
case LT_EXPR:
case LE_EXPR:
break;
default:
/* Can't be cond = error_mark_node, because we want to preserve
the location until c_finish_omp_for. */
cond = build1 (NOP_EXPR, boolean_type_node, error_mark_node);
break;
}
protected_set_expr_location (cond, cond_loc);
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
/* Parse the increment expression. */
incr = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN))
{
location_t incr_loc = c_parser_peek_token (parser)->location;
incr = c_process_expr_stmt (incr_loc,
c_parser_expression (parser).value);
}
parens.skip_until_found_close (parser);
if (decl == NULL || decl == error_mark_node || init == error_mark_node)
fail = true;
else
{
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
}
parse_next:
if (i == count - 1)
break;
/* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
in between the collapsed for loops to be still considered perfectly
nested. Hopefully the final version clarifies this.
For now handle (multiple) {'s and empty statements. */
do
{
if (c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_consume_token (parser);
break;
}
else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_consume_token (parser);
bracecount++;
}
else if (bracecount
&& c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "not enough perfectly nested loops");
if (bracecount)
{
open_brace_parsed = true;
bracecount--;
}
fail = true;
count = 0;
break;
}
}
while (1);
nbraces += bracecount;
}
if (nbraces)
if_p = NULL;
save_break = c_break_label;
c_break_label = size_one_node;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = push_stmt_list ();
if (open_brace_parsed)
{
location_t here = c_parser_peek_token (parser)->location;
stmt = c_begin_compound_stmt (true);
c_parser_compound_statement_nostart (parser);
add_stmt (c_end_compound_stmt (here, stmt, true));
}
else
add_stmt (c_parser_c99_block_statement (parser, if_p));
if (c_cont_label)
{
tree t = build1 (LABEL_EXPR, void_type_node, c_cont_label);
SET_EXPR_LOCATION (t, loc);
add_stmt (t);
}
body = pop_stmt_list (body);
c_break_label = save_break;
c_cont_label = save_cont;
while (nbraces)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
nbraces--;
}
else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "collapsed loops not perfectly nested");
while (nbraces)
{
location_t here = c_parser_peek_token (parser)->location;
stmt = c_begin_compound_stmt (true);
add_stmt (body);
c_parser_compound_statement_nostart (parser);
body = c_end_compound_stmt (here, stmt, true);
nbraces--;
}
goto pop_scopes;
}
}
/* Only bother calling c_finish_omp_for if we haven't already generated
an error from the initialization parsing. */
if (!fail)
{
stmt = c_finish_omp_for (loc, code, declv, NULL, initv, condv,
incrv, body, pre_body);
/* Check for iterators appearing in lb, b or incr expressions. */
if (stmt && !c_omp_check_loop_iv (stmt, declv, NULL))
stmt = NULL_TREE;
if (stmt)
{
add_stmt (stmt);
if (cclauses != NULL
&& cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] != NULL)
{
tree *c;
for (c = &cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; *c ; )
if (OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_LASTPRIVATE)
c = &OMP_CLAUSE_CHAIN (*c);
else
{
for (i = 0; i < count; i++)
if (TREE_VEC_ELT (declv, i) == OMP_CLAUSE_DECL (*c))
break;
if (i == count)
c = &OMP_CLAUSE_CHAIN (*c);
else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE)
{
error_at (loc,
"iteration variable %qD should not be firstprivate",
OMP_CLAUSE_DECL (*c));
*c = OMP_CLAUSE_CHAIN (*c);
}
else
{
/* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */
tree l = *c;
*c = OMP_CLAUSE_CHAIN (*c);
if (code == OMP_SIMD)
{
OMP_CLAUSE_CHAIN (l)
= cclauses[C_OMP_CLAUSE_SPLIT_FOR];
cclauses[C_OMP_CLAUSE_SPLIT_FOR] = l;
}
else
{
OMP_CLAUSE_CHAIN (l) = clauses;
clauses = l;
}
}
}
}
OMP_FOR_CLAUSES (stmt) = clauses;
}
ret = stmt;
}
pop_scopes:
while (!for_block->is_empty ())
{
/* FIXME diagnostics: LOC below should be the actual location of
this particular for block. We need to build a list of
locations to go along with FOR_BLOCK. */
stmt = c_end_compound_stmt (loc, for_block->pop (), true);
add_stmt (stmt);
}
release_tree_vector (for_block);
return ret;
}
/* Helper function for OpenMP parsing, split clauses and call
finish_omp_clauses on each of the set of clauses afterwards. */
static void
omp_split_clauses (location_t loc, enum tree_code code,
omp_clause_mask mask, tree clauses, tree *cclauses)
{
int i;
c_omp_split_clauses (loc, code, mask, clauses, cclauses);
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
if (cclauses[i])
cclauses[i] = c_finish_omp_clauses (cclauses[i], C_ORT_OMP);
}
/* OpenMP 4.0:
#pragma omp simd simd-clause[optseq] new-line
for-loop
LOC is the location of the #pragma token.
*/
#define OMP_SIMD_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SAFELEN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
c_parser_omp_simd (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree block, clauses, ret;
strcat (p_name, " simd");
mask |= OMP_SIMD_CLAUSE_MASK;
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_SIMD, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
tree c = omp_find_clause (cclauses[C_OMP_CLAUSE_SPLIT_FOR],
OMP_CLAUSE_ORDERED);
if (c && OMP_CLAUSE_ORDERED_EXPR (c))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> clause with parameter may not be specified "
"on %qs construct", p_name);
OMP_CLAUSE_ORDERED_EXPR (c) = NULL_TREE;
}
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_SIMD, clauses, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop
OpenMP 4.0:
#pragma omp for simd for-simd-clause[optseq] new-line
for-loop
LOC is the location of the #pragma token.
*/
#define OMP_FOR_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_for (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree block, clauses, ret;
strcat (p_name, " for");
mask |= OMP_FOR_CLAUSE_MASK;
/* parallel for{, simd} disallows nowait clause, but for
target {teams distribute ,}parallel for{, simd} it should be accepted. */
if (cclauses && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
/* Composite distribute parallel for{, simd} disallows ordered clause. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "simd") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_simd (loc, parser, p_name, mask, cclauses,
if_p);
block = c_begin_compound_stmt (true);
ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL_TREE)
return ret;
ret = make_node (OMP_FOR);
TREE_TYPE (ret) = void_type_node;
OMP_FOR_BODY (ret) = block;
OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
SET_EXPR_LOCATION (ret, loc);
add_stmt (ret);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
/* Composite distribute parallel for disallows linear clause. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR);
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_FOR, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_FOR, clauses, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma omp master new-line
structured-block
LOC is the location of the #pragma token.
*/
static tree
c_parser_omp_master (location_t loc, c_parser *parser, bool *if_p)
{
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_master (loc, c_parser_omp_structured_block (parser,
if_p));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block
OpenMP 4.5:
# pragma omp ordered ordered-clauses new-line
structured-block
# pragma omp ordered depend-clauses new-line */
#define OMP_ORDERED_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREADS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMD))
#define OMP_ORDERED_DEPEND_CLAUSE_MASK \
(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND)
static bool
c_parser_omp_ordered (c_parser *parser, enum pragma_context context,
bool *if_p)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
if (context != pragma_stmt && context != pragma_compound)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (!strcmp ("depend", p))
{
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
if (context == pragma_stmt)
{
error_at (loc,
"%<#pragma omp ordered%> with %<depend%> clause may "
"only be used in compound statements");
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
tree clauses
= c_parser_omp_all_clauses (parser,
OMP_ORDERED_DEPEND_CLAUSE_MASK,
"#pragma omp ordered");
c_finish_omp_ordered (loc, clauses, NULL_TREE);
return false;
}
}
tree clauses = c_parser_omp_all_clauses (parser, OMP_ORDERED_CLAUSE_MASK,
"#pragma omp ordered");
if (!flag_openmp /* flag_openmp_simd */
&& omp_find_clause (clauses, OMP_CLAUSE_SIMD) == NULL_TREE)
return false;
c_finish_omp_ordered (loc, clauses,
c_parser_omp_structured_block (parser, if_p));
return true;
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block
SECTIONS_LOC is the location of the #pragma omp sections. */
static tree
c_parser_omp_sections_scope (location_t sections_loc, c_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
location_t loc;
loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Avoid skipping until the end of the block. */
parser->error = false;
return NULL_TREE;
}
stmt = push_stmt_list ();
if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION)
{
substmt = c_parser_omp_structured_block (parser, NULL);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
while (1)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
if (c_parser_next_token_is (parser, CPP_EOF))
break;
loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
error_suppress = false;
}
else if (!error_suppress)
{
error_at (loc, "expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = c_parser_omp_structured_block (parser, NULL);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE,
"expected %<#pragma omp section%> or %<}%>");
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
SET_EXPR_LOCATION (stmt, sections_loc);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
return add_stmt (stmt);
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope
LOC is the location of the #pragma token.
*/
#define OMP_SECTIONS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_sections (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses)
{
tree block, clauses, ret;
strcat (p_name, " sections");
mask |= OMP_SECTIONS_CLAUSE_MASK;
if (cclauses)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_SECTIONS, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS];
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_sections_scope (loc, parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma omp parallel parallel-clause[optseq] new-line
structured-block
# pragma omp parallel for parallel-for-clause[optseq] new-line
structured-block
# pragma omp parallel sections parallel-sections-clause[optseq] new-line
structured-block
OpenMP 4.0:
# pragma omp parallel for simd parallel-for-simd-clause[optseq] new-line
structured-block
LOC is the location of the #pragma token.
*/
#define OMP_PARALLEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND))
static tree
c_parser_omp_parallel (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree stmt, clauses, block;
strcat (p_name, " parallel");
mask |= OMP_PARALLEL_CLAUSE_MASK;
/* #pragma omp target parallel{, for, for simd} disallow copyin clause. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0
&& (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN);
if (c_parser_next_token_is_keyword (parser, RID_FOR))
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_for (loc, parser, p_name, mask, cclauses, if_p);
block = c_begin_omp_parallel ();
tree ret = c_parser_omp_for (loc, parser, p_name, mask, cclauses, if_p);
stmt
= c_finish_omp_parallel (loc, cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
block);
if (ret == NULL_TREE)
return ret;
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
/* When combined with distribute, parallel has to be followed by for.
#pragma omp target parallel is allowed though. */
else if (cclauses
&& (mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
error_at (loc, "expected %<for%> after %qs", p_name);
c_parser_skip_to_pragma_eol (parser);
return NULL_TREE;
}
else if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
else if (cclauses == NULL && c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "sections") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
block = c_begin_omp_parallel ();
c_parser_omp_sections (loc, parser, p_name, mask, cclauses);
stmt = c_finish_omp_parallel (loc,
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
block);
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_PARALLEL, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
}
block = c_begin_omp_parallel ();
c_parser_statement (parser, if_p);
stmt = c_finish_omp_parallel (loc, clauses, block);
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block
LOC is the location of the #pragma.
*/
#define OMP_SINGLE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_single (location_t loc, c_parser *parser, bool *if_p)
{
tree stmt = make_node (OMP_SINGLE);
SET_EXPR_LOCATION (stmt, loc);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single");
OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser, if_p);
return add_stmt (stmt);
}
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
LOC is the location of the #pragma.
*/
#define OMP_TASK_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY))
static tree
c_parser_omp_task (location_t loc, c_parser *parser, bool *if_p)
{
tree clauses, block;
clauses = c_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
"#pragma omp task");
block = c_begin_omp_task ();
c_parser_statement (parser, if_p);
return c_finish_omp_task (loc, clauses, block);
}
/* OpenMP 3.0:
# pragma omp taskwait new-line
*/
static void
c_parser_omp_taskwait (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_taskwait (loc);
}
/* OpenMP 3.1:
# pragma omp taskyield new-line
*/
static void
c_parser_omp_taskyield (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_taskyield (loc);
}
/* OpenMP 4.0:
# pragma omp taskgroup new-line
*/
static tree
c_parser_omp_taskgroup (c_parser *parser, bool *if_p)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_taskgroup (loc, c_parser_omp_structured_block (parser,
if_p));
}
/* OpenMP 4.0:
# pragma omp cancel cancel-clause[optseq] new-line
LOC is the location of the #pragma.
*/
#define OMP_CANCEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF))
static void
c_parser_omp_cancel (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
tree clauses = c_parser_omp_all_clauses (parser, OMP_CANCEL_CLAUSE_MASK,
"#pragma omp cancel");
c_finish_omp_cancel (loc, clauses);
}
/* OpenMP 4.0:
# pragma omp cancellation point cancelpt-clause[optseq] new-line
LOC is the location of the #pragma.
*/
#define OMP_CANCELLATION_POINT_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP))
static void
c_parser_omp_cancellation_point (c_parser *parser, enum pragma_context context)
{
location_t loc = c_parser_peek_token (parser)->location;
tree clauses;
bool point_seen = false;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "point") == 0)
{
c_parser_consume_token (parser);
point_seen = true;
}
}
if (!point_seen)
{
c_parser_error (parser, "expected %<point%>");
c_parser_skip_to_pragma_eol (parser);
return;
}
if (context != pragma_compound)
{
if (context == pragma_stmt)
error_at (loc,
"%<#pragma %s%> may only be used in compound statements",
"omp cancellation point");
else
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_pragma_eol (parser, false);
return;
}
clauses
= c_parser_omp_all_clauses (parser, OMP_CANCELLATION_POINT_CLAUSE_MASK,
"#pragma omp cancellation point");
c_finish_omp_cancellation_point (loc, clauses);
}
/* OpenMP 4.0:
#pragma omp distribute distribute-clause[optseq] new-line
for-loop */
#define OMP_DISTRIBUTE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
c_parser_omp_distribute (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree clauses, block, ret;
strcat (p_name, " distribute");
mask |= OMP_DISTRIBUTE_CLAUSE_MASK;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
bool simd = false;
bool parallel = false;
if (strcmp (p, "simd") == 0)
simd = true;
else
parallel = strcmp (p, "parallel") == 0;
if (parallel || simd)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
{
if (simd)
return c_parser_omp_simd (loc, parser, p_name, mask, cclauses,
if_p);
else
return c_parser_omp_parallel (loc, parser, p_name, mask,
cclauses, if_p);
}
block = c_begin_compound_stmt (true);
if (simd)
ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses,
if_p);
else
ret = c_parser_omp_parallel (loc, parser, p_name, mask, cclauses,
if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL)
return ret;
ret = make_node (OMP_DISTRIBUTE);
TREE_TYPE (ret) = void_type_node;
OMP_FOR_BODY (ret) = block;
OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
SET_EXPR_LOCATION (ret, loc);
add_stmt (ret);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_DISTRIBUTE, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_DISTRIBUTE, clauses, NULL,
if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 4.0:
# pragma omp teams teams-clause[optseq] new-line
structured-block */
#define OMP_TEAMS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT))
static tree
c_parser_omp_teams (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree clauses, block, ret;
strcat (p_name, " teams");
mask |= OMP_TEAMS_CLAUSE_MASK;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "distribute") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_distribute (loc, parser, p_name, mask,
cclauses, if_p);
block = c_begin_compound_stmt (true);
ret = c_parser_omp_distribute (loc, parser, p_name, mask, cclauses,
if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL)
return ret;
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
ret = make_node (OMP_TEAMS);
TREE_TYPE (ret) = void_type_node;
OMP_TEAMS_CLAUSES (ret) = clauses;
OMP_TEAMS_BODY (ret) = block;
OMP_TEAMS_COMBINED (ret) = 1;
return add_stmt (ret);
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_TEAMS, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
}
tree stmt = make_node (OMP_TEAMS);
TREE_TYPE (stmt) = void_type_node;
OMP_TEAMS_CLAUSES (stmt) = clauses;
OMP_TEAMS_BODY (stmt) = c_parser_omp_structured_block (parser, if_p);
return add_stmt (stmt);
}
/* OpenMP 4.0:
# pragma omp target data target-data-clause[optseq] new-line
structured-block */
#define OMP_TARGET_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR))
static tree
c_parser_omp_target_data (location_t loc, c_parser *parser, bool *if_p)
{
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TARGET_DATA_CLAUSE_MASK,
"#pragma omp target data");
int map_seen = 0;
for (tree *pc = &clauses; *pc;)
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (*pc))
{
case GOMP_MAP_TO:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_ALLOC:
map_seen = 3;
break;
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALWAYS_POINTER:
break;
default:
map_seen |= 1;
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<#pragma omp target data%> with map-type other "
"than %<to%>, %<from%>, %<tofrom%> or %<alloc%> "
"on %<map%> clause");
*pc = OMP_CLAUSE_CHAIN (*pc);
continue;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (map_seen != 3)
{
if (map_seen == 0)
error_at (loc,
"%<#pragma omp target data%> must contain at least "
"one %<map%> clause");
return NULL_TREE;
}
tree stmt = make_node (OMP_TARGET_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_DATA_CLAUSES (stmt) = clauses;
keep_next_level ();
tree block = c_begin_compound_stmt (true);
add_stmt (c_parser_omp_structured_block (parser, if_p));
OMP_TARGET_DATA_BODY (stmt) = c_end_compound_stmt (loc, block, true);
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* OpenMP 4.0:
# pragma omp target update target-update-clause[optseq] new-line */
#define OMP_TARGET_UPDATE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FROM) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static bool
c_parser_omp_target_update (location_t loc, c_parser *parser,
enum pragma_context context)
{
if (context == pragma_stmt)
{
error_at (loc, "%<#pragma %s%> may only be used in compound statements",
"omp target update");
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TARGET_UPDATE_CLAUSE_MASK,
"#pragma omp target update");
if (omp_find_clause (clauses, OMP_CLAUSE_TO) == NULL_TREE
&& omp_find_clause (clauses, OMP_CLAUSE_FROM) == NULL_TREE)
{
error_at (loc,
"%<#pragma omp target update%> must contain at least one "
"%<from%> or %<to%> clauses");
return false;
}
tree stmt = make_node (OMP_TARGET_UPDATE);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_UPDATE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
return false;
}
/* OpenMP 4.5:
# pragma omp target enter data target-data-clause[optseq] new-line */
#define OMP_TARGET_ENTER_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_target_enter_data (location_t loc, c_parser *parser,
enum pragma_context context)
{
bool data_seen = false;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "data") == 0)
{
c_parser_consume_token (parser);
data_seen = true;
}
}
if (!data_seen)
{
c_parser_error (parser, "expected %<data%>");
c_parser_skip_to_pragma_eol (parser);
return NULL_TREE;
}
if (context == pragma_stmt)
{
error_at (loc, "%<#pragma %s%> may only be used in compound statements",
"omp target enter data");
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TARGET_ENTER_DATA_CLAUSE_MASK,
"#pragma omp target enter data");
int map_seen = 0;
for (tree *pc = &clauses; *pc;)
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (*pc))
{
case GOMP_MAP_TO:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALLOC:
map_seen = 3;
break;
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALWAYS_POINTER:
break;
default:
map_seen |= 1;
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<#pragma omp target enter data%> with map-type other "
"than %<to%> or %<alloc%> on %<map%> clause");
*pc = OMP_CLAUSE_CHAIN (*pc);
continue;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (map_seen != 3)
{
if (map_seen == 0)
error_at (loc,
"%<#pragma omp target enter data%> must contain at least "
"one %<map%> clause");
return NULL_TREE;
}
tree stmt = make_node (OMP_TARGET_ENTER_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_ENTER_DATA_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
return stmt;
}
/* OpenMP 4.5:
# pragma omp target exit data target-data-clause[optseq] new-line */
#define OMP_TARGET_EXIT_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_target_exit_data (location_t loc, c_parser *parser,
enum pragma_context context)
{
bool data_seen = false;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "data") == 0)
{
c_parser_consume_token (parser);
data_seen = true;
}
}
if (!data_seen)
{
c_parser_error (parser, "expected %<data%>");
c_parser_skip_to_pragma_eol (parser);
return NULL_TREE;
}
if (context == pragma_stmt)
{
error_at (loc, "%<#pragma %s%> may only be used in compound statements",
"omp target exit data");
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TARGET_EXIT_DATA_CLAUSE_MASK,
"#pragma omp target exit data");
int map_seen = 0;
for (tree *pc = &clauses; *pc;)
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (*pc))
{
case GOMP_MAP_FROM:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_DELETE:
map_seen = 3;
break;
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALWAYS_POINTER:
break;
default:
map_seen |= 1;
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<#pragma omp target exit data%> with map-type other "
"than %<from%>, %<release%> or %<delete%> on %<map%>"
" clause");
*pc = OMP_CLAUSE_CHAIN (*pc);
continue;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (map_seen != 3)
{
if (map_seen == 0)
error_at (loc,
"%<#pragma omp target exit data%> must contain at least one "
"%<map%> clause");
return NULL_TREE;
}
tree stmt = make_node (OMP_TARGET_EXIT_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_EXIT_DATA_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
return stmt;
}
/* OpenMP 4.0:
# pragma omp target target-clause[optseq] new-line
structured-block */
#define OMP_TARGET_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULTMAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR))
static bool
c_parser_omp_target (c_parser *parser, enum pragma_context context, bool *if_p)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
tree *pc = NULL, stmt, block;
if (context != pragma_stmt && context != pragma_compound)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_pragma_eol (parser);
return false;
}
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
enum tree_code ccode = ERROR_MARK;
if (strcmp (p, "teams") == 0)
ccode = OMP_TEAMS;
else if (strcmp (p, "parallel") == 0)
ccode = OMP_PARALLEL;
else if (strcmp (p, "simd") == 0)
ccode = OMP_SIMD;
if (ccode != ERROR_MARK)
{
tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT];
char p_name[sizeof ("#pragma omp target teams distribute "
"parallel for simd")];
c_parser_consume_token (parser);
strcpy (p_name, "#pragma omp target");
if (!flag_openmp) /* flag_openmp_simd */
{
tree stmt;
switch (ccode)
{
case OMP_TEAMS:
stmt = c_parser_omp_teams (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK,
cclauses, if_p);
break;
case OMP_PARALLEL:
stmt = c_parser_omp_parallel (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK,
cclauses, if_p);
break;
case OMP_SIMD:
stmt = c_parser_omp_simd (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK,
cclauses, if_p);
break;
default:
gcc_unreachable ();
}
return stmt != NULL_TREE;
}
keep_next_level ();
tree block = c_begin_compound_stmt (true), ret;
switch (ccode)
{
case OMP_TEAMS:
ret = c_parser_omp_teams (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK, cclauses,
if_p);
break;
case OMP_PARALLEL:
ret = c_parser_omp_parallel (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK, cclauses,
if_p);
break;
case OMP_SIMD:
ret = c_parser_omp_simd (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK, cclauses,
if_p);
break;
default:
gcc_unreachable ();
}
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL_TREE)
return false;
if (ccode == OMP_TEAMS)
{
/* For combined target teams, ensure the num_teams and
thread_limit clause expressions are evaluated on the host,
before entering the target construct. */
tree c;
for (c = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
c; c = OMP_CLAUSE_CHAIN (c))
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT)
&& TREE_CODE (OMP_CLAUSE_OPERAND (c, 0)) != INTEGER_CST)
{
tree expr = OMP_CLAUSE_OPERAND (c, 0);
tree tmp = create_tmp_var_raw (TREE_TYPE (expr));
expr = build4 (TARGET_EXPR, TREE_TYPE (expr), tmp,
expr, NULL_TREE, NULL_TREE);
add_stmt (expr);
OMP_CLAUSE_OPERAND (c, 0) = expr;
tree tc = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (tc) = tmp;
OMP_CLAUSE_CHAIN (tc)
= cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = tc;
}
}
tree stmt = make_node (OMP_TARGET);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_CLAUSES (stmt) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
OMP_TARGET_BODY (stmt) = block;
OMP_TARGET_COMBINED (stmt) = 1;
add_stmt (stmt);
pc = &OMP_TARGET_CLAUSES (stmt);
goto check_clauses;
}
else if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
else if (strcmp (p, "data") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_target_data (loc, parser, if_p);
return true;
}
else if (strcmp (p, "enter") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_target_enter_data (loc, parser, context);
return false;
}
else if (strcmp (p, "exit") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_target_exit_data (loc, parser, context);
return false;
}
else if (strcmp (p, "update") == 0)
{
c_parser_consume_token (parser);
return c_parser_omp_target_update (loc, parser, context);
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
stmt = make_node (OMP_TARGET);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_CLAUSES (stmt)
= c_parser_omp_all_clauses (parser, OMP_TARGET_CLAUSE_MASK,
"#pragma omp target");
pc = &OMP_TARGET_CLAUSES (stmt);
keep_next_level ();
block = c_begin_compound_stmt (true);
add_stmt (c_parser_omp_structured_block (parser, if_p));
OMP_TARGET_BODY (stmt) = c_end_compound_stmt (loc, block, true);
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
check_clauses:
while (*pc)
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (*pc))
{
case GOMP_MAP_TO:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_ALLOC:
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALWAYS_POINTER:
break;
default:
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<#pragma omp target%> with map-type other "
"than %<to%>, %<from%>, %<tofrom%> or %<alloc%> "
"on %<map%> clause");
*pc = OMP_CLAUSE_CHAIN (*pc);
continue;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
}
return true;
}
/* OpenMP 4.0:
# pragma omp declare simd declare-simd-clauses[optseq] new-line */
#define OMP_DECLARE_SIMD_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_INBRANCH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOTINBRANCH))
static void
c_parser_omp_declare_simd (c_parser *parser, enum pragma_context context)
{
auto_vec<c_token> clauses;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
{
c_parser_skip_to_pragma_eol (parser);
return;
}
clauses.safe_push (*token);
c_parser_consume_token (parser);
}
clauses.safe_push (*c_parser_peek_token (parser));
c_parser_skip_to_pragma_eol (parser);
while (c_parser_next_token_is (parser, CPP_PRAGMA))
{
if (c_parser_peek_token (parser)->pragma_kind
!= PRAGMA_OMP_DECLARE
|| c_parser_peek_2nd_token (parser)->type != CPP_NAME
|| strcmp (IDENTIFIER_POINTER
(c_parser_peek_2nd_token (parser)->value),
"simd") != 0)
{
c_parser_error (parser,
"%<#pragma omp declare simd%> must be followed by "
"function declaration or definition or another "
"%<#pragma omp declare simd%>");
return;
}
c_parser_consume_pragma (parser);
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
{
c_parser_skip_to_pragma_eol (parser);
return;
}
clauses.safe_push (*token);
c_parser_consume_token (parser);
}
clauses.safe_push (*c_parser_peek_token (parser));
c_parser_skip_to_pragma_eol (parser);
}
/* Make sure nothing tries to read past the end of the tokens. */
c_token eof_token;
memset (&eof_token, 0, sizeof (eof_token));
eof_token.type = CPP_EOF;
clauses.safe_push (eof_token);
clauses.safe_push (eof_token);
switch (context)
{
case pragma_external:
if (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION)
{
int ext = disable_extension_diagnostics ();
do
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION);
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, clauses);
restore_extension_diagnostics (ext);
}
else
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, clauses);
break;
case pragma_struct:
case pragma_param:
case pragma_stmt:
c_parser_error (parser, "%<#pragma omp declare simd%> must be followed by "
"function declaration or definition");
break;
case pragma_compound:
if (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION)
{
int ext = disable_extension_diagnostics ();
do
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION);
if (c_parser_next_tokens_start_declaration (parser))
{
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, NULL, clauses);
restore_extension_diagnostics (ext);
break;
}
restore_extension_diagnostics (ext);
}
else if (c_parser_next_tokens_start_declaration (parser))
{
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
NULL, clauses);
break;
}
c_parser_error (parser, "%<#pragma omp declare simd%> must be followed by "
"function declaration or definition");
break;
default:
gcc_unreachable ();
}
}
/* Finalize #pragma omp declare simd clauses after FNDECL has been parsed,
and put that into "omp declare simd" attribute. */
static void
c_finish_omp_declare_simd (c_parser *parser, tree fndecl, tree parms,
vec<c_token> clauses)
{
/* Normally first token is CPP_NAME "simd". CPP_EOF there indicates
error has been reported and CPP_PRAGMA that c_finish_omp_declare_simd
has already processed the tokens. */
if (clauses.exists () && clauses[0].type == CPP_EOF)
return;
if (fndecl == NULL_TREE || TREE_CODE (fndecl) != FUNCTION_DECL)
{
error ("%<#pragma omp declare simd%> not immediately followed by "
"a function declaration or definition");
clauses[0].type = CPP_EOF;
return;
}
if (clauses.exists () && clauses[0].type != CPP_NAME)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"%<#pragma omp declare simd%> not immediately followed by "
"a single function declaration or definition");
clauses[0].type = CPP_EOF;
return;
}
if (parms == NULL_TREE)
parms = DECL_ARGUMENTS (fndecl);
unsigned int tokens_avail = parser->tokens_avail;
gcc_assert (parser->tokens == &parser->tokens_buf[0]);
parser->tokens = clauses.address ();
parser->tokens_avail = clauses.length ();
/* c_parser_omp_declare_simd pushed 2 extra CPP_EOF tokens at the end. */
while (parser->tokens_avail > 3)
{
c_token *token = c_parser_peek_token (parser);
gcc_assert (token->type == CPP_NAME
&& strcmp (IDENTIFIER_POINTER (token->value), "simd") == 0);
c_parser_consume_token (parser);
parser->in_pragma = true;
tree c = NULL_TREE;
c = c_parser_omp_all_clauses (parser, OMP_DECLARE_SIMD_CLAUSE_MASK,
"#pragma omp declare simd");
c = c_omp_declare_simd_clauses_to_numbers (parms, c);
if (c != NULL_TREE)
c = tree_cons (NULL_TREE, c, NULL_TREE);
c = build_tree_list (get_identifier ("omp declare simd"), c);
TREE_CHAIN (c) = DECL_ATTRIBUTES (fndecl);
DECL_ATTRIBUTES (fndecl) = c;
}
parser->tokens = &parser->tokens_buf[0];
parser->tokens_avail = tokens_avail;
if (clauses.exists ())
clauses[0].type = CPP_PRAGMA;
}
/* OpenMP 4.0:
# pragma omp declare target new-line
declarations and definitions
# pragma omp end declare target new-line
OpenMP 4.5:
# pragma omp declare target ( extended-list ) new-line
# pragma omp declare target declare-target-clauses[seq] new-line */
#define OMP_DECLARE_TARGET_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINK))
static void
c_parser_omp_declare_target (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree clauses = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_NAME))
clauses = c_parser_omp_all_clauses (parser, OMP_DECLARE_TARGET_CLAUSE_MASK,
"#pragma omp declare target");
else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO_DECLARE,
clauses);
clauses = c_finish_omp_clauses (clauses, C_ORT_OMP);
c_parser_skip_to_pragma_eol (parser);
}
else
{
c_parser_skip_to_pragma_eol (parser);
current_omp_declare_target_attribute++;
return;
}
if (current_omp_declare_target_attribute)
error_at (loc, "%<#pragma omp declare target%> with clauses in between "
"%<#pragma omp declare target%> without clauses and "
"%<#pragma omp end declare target%>");
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
tree t = OMP_CLAUSE_DECL (c), id;
tree at1 = lookup_attribute ("omp declare target", DECL_ATTRIBUTES (t));
tree at2 = lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (t));
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINK)
{
id = get_identifier ("omp declare target link");
std::swap (at1, at2);
}
else
id = get_identifier ("omp declare target");
if (at2)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD specified both in declare target %<link%> and %<to%>"
" clauses", t);
continue;
}
if (!at1)
{
DECL_ATTRIBUTES (t) = tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (t));
if (TREE_CODE (t) != FUNCTION_DECL && !is_global_var (t))
continue;
symtab_node *node = symtab_node::get (t);
if (node != NULL)
{
node->offloadable = 1;
if (ENABLE_OFFLOADING)
{
g->have_offload = true;
if (is_a <varpool_node *> (node))
vec_safe_push (offload_vars, t);
}
}
}
}
}
static void
c_parser_omp_end_declare_target (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value),
"declare") == 0)
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value),
"target") == 0)
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "expected %<target%>");
c_parser_skip_to_pragma_eol (parser);
return;
}
}
else
{
c_parser_error (parser, "expected %<declare%>");
c_parser_skip_to_pragma_eol (parser);
return;
}
c_parser_skip_to_pragma_eol (parser);
if (!current_omp_declare_target_attribute)
error_at (loc, "%<#pragma omp end declare target%> without corresponding "
"%<#pragma omp declare target%>");
else
current_omp_declare_target_attribute--;
}
/* OpenMP 4.0
#pragma omp declare reduction (reduction-id : typename-list : expression) \
initializer-clause[opt] new-line
initializer-clause:
initializer (omp_priv = initializer)
initializer (function-name (argument-list)) */
static void
c_parser_omp_declare_reduction (c_parser *parser, enum pragma_context context)
{
unsigned int tokens_avail = 0, i;
vec<tree> types = vNULL;
vec<c_token> clauses = vNULL;
enum tree_code reduc_code = ERROR_MARK;
tree reduc_id = NULL_TREE;
tree type;
location_t rloc = c_parser_peek_token (parser)->location;
if (context == pragma_struct || context == pragma_param)
{
error ("%<#pragma omp declare reduction%> not at file or block scope");
goto fail;
}
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
goto fail;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
reduc_code = PLUS_EXPR;
break;
case CPP_MULT:
reduc_code = MULT_EXPR;
break;
case CPP_MINUS:
reduc_code = MINUS_EXPR;
break;
case CPP_AND:
reduc_code = BIT_AND_EXPR;
break;
case CPP_XOR:
reduc_code = BIT_XOR_EXPR;
break;
case CPP_OR:
reduc_code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
reduc_code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
reduc_code = TRUTH_ORIF_EXPR;
break;
case CPP_NAME:
const char *p;
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "min") == 0)
{
reduc_code = MIN_EXPR;
break;
}
if (strcmp (p, "max") == 0)
{
reduc_code = MAX_EXPR;
break;
}
reduc_id = c_parser_peek_token (parser)->value;
break;
default:
c_parser_error (parser,
"expected %<+%>, %<*%>, %<-%>, %<&%>, "
"%<^%>, %<|%>, %<&&%>, %<||%> or identifier");
goto fail;
}
tree orig_reduc_id, reduc_decl;
orig_reduc_id = reduc_id;
reduc_id = c_omp_reduction_id (reduc_code, reduc_id);
reduc_decl = c_omp_reduction_decl (reduc_id);
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
goto fail;
while (true)
{
location_t loc = c_parser_peek_token (parser)->location;
struct c_type_name *ctype = c_parser_type_name (parser);
if (ctype != NULL)
{
type = groktypename (ctype, NULL, NULL);
if (type == error_mark_node)
;
else if ((INTEGRAL_TYPE_P (type)
|| TREE_CODE (type) == REAL_TYPE
|| TREE_CODE (type) == COMPLEX_TYPE)
&& orig_reduc_id == NULL_TREE)
error_at (loc, "predeclared arithmetic type in "
"%<#pragma omp declare reduction%>");
else if (TREE_CODE (type) == FUNCTION_TYPE
|| TREE_CODE (type) == ARRAY_TYPE)
error_at (loc, "function or array type in "
"%<#pragma omp declare reduction%>");
else if (TYPE_ATOMIC (type))
error_at (loc, "%<_Atomic%> qualified type in "
"%<#pragma omp declare reduction%>");
else if (TYPE_QUALS_NO_ADDR_SPACE (type))
error_at (loc, "const, volatile or restrict qualified type in "
"%<#pragma omp declare reduction%>");
else
{
tree t;
for (t = DECL_INITIAL (reduc_decl); t; t = TREE_CHAIN (t))
if (comptypes (TREE_PURPOSE (t), type))
{
error_at (loc, "redeclaration of %qs "
"%<#pragma omp declare reduction%> for "
"type %qT",
IDENTIFIER_POINTER (reduc_id)
+ sizeof ("omp declare reduction ") - 1,
type);
location_t ploc
= DECL_SOURCE_LOCATION (TREE_VEC_ELT (TREE_VALUE (t),
0));
error_at (ploc, "previous %<#pragma omp declare "
"reduction%>");
break;
}
if (t == NULL_TREE)
types.safe_push (type);
}
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
else
break;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")
|| types.is_empty ())
{
fail:
clauses.release ();
types.release ();
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL)
break;
c_parser_consume_token (parser);
}
c_parser_skip_to_pragma_eol (parser);
return;
}
if (types.length () > 1)
{
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
goto fail;
clauses.safe_push (*token);
c_parser_consume_token (parser);
}
clauses.safe_push (*c_parser_peek_token (parser));
c_parser_skip_to_pragma_eol (parser);
/* Make sure nothing tries to read past the end of the tokens. */
c_token eof_token;
memset (&eof_token, 0, sizeof (eof_token));
eof_token.type = CPP_EOF;
clauses.safe_push (eof_token);
clauses.safe_push (eof_token);
}
int errs = errorcount;
FOR_EACH_VEC_ELT (types, i, type)
{
tokens_avail = parser->tokens_avail;
gcc_assert (parser->tokens == &parser->tokens_buf[0]);
if (!clauses.is_empty ())
{
parser->tokens = clauses.address ();
parser->tokens_avail = clauses.length ();
parser->in_pragma = true;
}
bool nested = current_function_decl != NULL_TREE;
if (nested)
c_push_function_context ();
tree fndecl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
reduc_id, default_function_type);
current_function_decl = fndecl;
allocate_struct_function (fndecl, true);
push_scope ();
tree stmt = push_stmt_list ();
/* Intentionally BUILTINS_LOCATION, so that -Wshadow doesn't
warn about these. */
tree omp_out = build_decl (BUILTINS_LOCATION, VAR_DECL,
get_identifier ("omp_out"), type);
DECL_ARTIFICIAL (omp_out) = 1;
DECL_CONTEXT (omp_out) = fndecl;
pushdecl (omp_out);
tree omp_in = build_decl (BUILTINS_LOCATION, VAR_DECL,
get_identifier ("omp_in"), type);
DECL_ARTIFICIAL (omp_in) = 1;
DECL_CONTEXT (omp_in) = fndecl;
pushdecl (omp_in);
struct c_expr combiner = c_parser_expression (parser);
struct c_expr initializer;
tree omp_priv = NULL_TREE, omp_orig = NULL_TREE;
bool bad = false;
initializer.set_error ();
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
bad = true;
else if (c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (IDENTIFIER_POINTER
(c_parser_peek_token (parser)->value),
"initializer") == 0)
{
c_parser_consume_token (parser);
pop_scope ();
push_scope ();
omp_priv = build_decl (BUILTINS_LOCATION, VAR_DECL,
get_identifier ("omp_priv"), type);
DECL_ARTIFICIAL (omp_priv) = 1;
DECL_INITIAL (omp_priv) = error_mark_node;
DECL_CONTEXT (omp_priv) = fndecl;
pushdecl (omp_priv);
omp_orig = build_decl (BUILTINS_LOCATION, VAR_DECL,
get_identifier ("omp_orig"), type);
DECL_ARTIFICIAL (omp_orig) = 1;
DECL_CONTEXT (omp_orig) = fndecl;
pushdecl (omp_orig);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
bad = true;
else if (!c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<omp_priv%> or "
"function-name");
bad = true;
}
else if (strcmp (IDENTIFIER_POINTER
(c_parser_peek_token (parser)->value),
"omp_priv") != 0)
{
if (c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
c_parser_error (parser, "expected function-name %<(%>");
bad = true;
}
else
initializer = c_parser_postfix_expression (parser);
if (initializer.value
&& TREE_CODE (initializer.value) == CALL_EXPR)
{
int j;
tree c = initializer.value;
for (j = 0; j < call_expr_nargs (c); j++)
{
tree a = CALL_EXPR_ARG (c, j);
STRIP_NOPS (a);
if (TREE_CODE (a) == ADDR_EXPR
&& TREE_OPERAND (a, 0) == omp_priv)
break;
}
if (j == call_expr_nargs (c))
error ("one of the initializer call arguments should be "
"%<&omp_priv%>");
}
}
else
{
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
bad = true;
else
{
tree st = push_stmt_list ();
location_t loc = c_parser_peek_token (parser)->location;
rich_location richloc (line_table, loc);
start_init (omp_priv, NULL_TREE, 0, &richloc);
struct c_expr init = c_parser_initializer (parser);
finish_init ();
finish_decl (omp_priv, loc, init.value,
init.original_type, NULL_TREE);
pop_stmt_list (st);
}
}
if (!bad
&& !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
bad = true;
}
if (!bad)
{
c_parser_skip_to_pragma_eol (parser);
tree t = tree_cons (type, make_tree_vec (omp_priv ? 6 : 3),
DECL_INITIAL (reduc_decl));
DECL_INITIAL (reduc_decl) = t;
DECL_SOURCE_LOCATION (omp_out) = rloc;
TREE_VEC_ELT (TREE_VALUE (t), 0) = omp_out;
TREE_VEC_ELT (TREE_VALUE (t), 1) = omp_in;
TREE_VEC_ELT (TREE_VALUE (t), 2) = combiner.value;
walk_tree (&combiner.value, c_check_omp_declare_reduction_r,
&TREE_VEC_ELT (TREE_VALUE (t), 0), NULL);
if (omp_priv)
{
DECL_SOURCE_LOCATION (omp_priv) = rloc;
TREE_VEC_ELT (TREE_VALUE (t), 3) = omp_priv;
TREE_VEC_ELT (TREE_VALUE (t), 4) = omp_orig;
TREE_VEC_ELT (TREE_VALUE (t), 5) = initializer.value;
walk_tree (&initializer.value, c_check_omp_declare_reduction_r,
&TREE_VEC_ELT (TREE_VALUE (t), 3), NULL);
walk_tree (&DECL_INITIAL (omp_priv),
c_check_omp_declare_reduction_r,
&TREE_VEC_ELT (TREE_VALUE (t), 3), NULL);
}
}
pop_stmt_list (stmt);
pop_scope ();
if (cfun->language != NULL)
{
ggc_free (cfun->language);
cfun->language = NULL;
}
set_cfun (NULL);
current_function_decl = NULL_TREE;
if (nested)
c_pop_function_context ();
if (!clauses.is_empty ())
{
parser->tokens = &parser->tokens_buf[0];
parser->tokens_avail = tokens_avail;
}
if (bad)
goto fail;
if (errs != errorcount)
break;
}
clauses.release ();
types.release ();
}
/* OpenMP 4.0
#pragma omp declare simd declare-simd-clauses[optseq] new-line
#pragma omp declare reduction (reduction-id : typename-list : expression) \
initializer-clause[opt] new-line
#pragma omp declare target new-line */
static void
c_parser_omp_declare (c_parser *parser, enum pragma_context context)
{
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "simd") == 0)
{
/* c_parser_consume_token (parser); done in
c_parser_omp_declare_simd. */
c_parser_omp_declare_simd (parser, context);
return;
}
if (strcmp (p, "reduction") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_declare_reduction (parser, context);
return;
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return;
}
if (strcmp (p, "target") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_declare_target (parser);
return;
}
}
c_parser_error (parser, "expected %<simd%> or %<reduction%> "
"or %<target%>");
c_parser_skip_to_pragma_eol (parser);
}
/* OpenMP 4.5:
#pragma omp taskloop taskloop-clause[optseq] new-line
for-loop
#pragma omp taskloop simd taskloop-simd-clause[optseq] new-line
for-loop */
#define OMP_TASKLOOP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_GRAINSIZE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TASKS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY))
static tree
c_parser_omp_taskloop (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree clauses, block, ret;
strcat (p_name, " taskloop");
mask |= OMP_TASKLOOP_CLAUSE_MASK;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "simd") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION);
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_simd (loc, parser, p_name, mask, cclauses,
if_p);
block = c_begin_compound_stmt (true);
ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL)
return ret;
ret = make_node (OMP_TASKLOOP);
TREE_TYPE (ret) = void_type_node;
OMP_FOR_BODY (ret) = block;
OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
SET_EXPR_LOCATION (ret, loc);
add_stmt (ret);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_TASKLOOP, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_TASKLOOP, clauses, NULL, if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* Main entry point to parsing most OpenMP pragmas. */
static void
c_parser_omp_construct (c_parser *parser, bool *if_p)
{
enum pragma_kind p_kind;
location_t loc;
tree stmt;
char p_name[sizeof "#pragma omp teams distribute parallel for simd"];
omp_clause_mask mask (0);
loc = c_parser_peek_token (parser)->location;
p_kind = c_parser_peek_token (parser)->pragma_kind;
c_parser_consume_pragma (parser);
switch (p_kind)
{
case PRAGMA_OACC_ATOMIC:
c_parser_omp_atomic (loc, parser);
return;
case PRAGMA_OACC_CACHE:
strcpy (p_name, "#pragma acc");
stmt = c_parser_oacc_cache (loc, parser);
break;
case PRAGMA_OACC_DATA:
stmt = c_parser_oacc_data (loc, parser, if_p);
break;
case PRAGMA_OACC_HOST_DATA:
stmt = c_parser_oacc_host_data (loc, parser, if_p);
break;
case PRAGMA_OACC_KERNELS:
case PRAGMA_OACC_PARALLEL:
strcpy (p_name, "#pragma acc");
stmt = c_parser_oacc_kernels_parallel (loc, parser, p_kind, p_name,
if_p);
break;
case PRAGMA_OACC_LOOP:
strcpy (p_name, "#pragma acc");
stmt = c_parser_oacc_loop (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OACC_WAIT:
strcpy (p_name, "#pragma wait");
stmt = c_parser_oacc_wait (loc, parser, p_name);
break;
case PRAGMA_OMP_ATOMIC:
c_parser_omp_atomic (loc, parser);
return;
case PRAGMA_OMP_CRITICAL:
stmt = c_parser_omp_critical (loc, parser, if_p);
break;
case PRAGMA_OMP_DISTRIBUTE:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_distribute (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_FOR:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_for (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_MASTER:
stmt = c_parser_omp_master (loc, parser, if_p);
break;
case PRAGMA_OMP_PARALLEL:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_parallel (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_SECTIONS:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_sections (loc, parser, p_name, mask, NULL);
break;
case PRAGMA_OMP_SIMD:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_simd (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_SINGLE:
stmt = c_parser_omp_single (loc, parser, if_p);
break;
case PRAGMA_OMP_TASK:
stmt = c_parser_omp_task (loc, parser, if_p);
break;
case PRAGMA_OMP_TASKGROUP:
stmt = c_parser_omp_taskgroup (parser, if_p);
break;
case PRAGMA_OMP_TASKLOOP:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_taskloop (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_TEAMS:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_teams (loc, parser, p_name, mask, NULL, if_p);
break;
default:
gcc_unreachable ();
}
if (stmt)
gcc_assert (EXPR_LOCATION (stmt) != UNKNOWN_LOCATION);
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
c_parser_omp_threadprivate (c_parser *parser)
{
tree vars, t;
location_t loc;
c_parser_consume_pragma (parser);
loc = c_parser_peek_token (parser)->location;
vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL);
/* Mark every variable in VARS to be assigned thread local storage. */
for (t = vars; t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
/* FIXME diagnostics: Ideally we should keep individual
locations for all the variables in the var list to make the
following errors more precise. Perhaps
c_parser_omp_var_list_parens() should construct a list of
locations to go along with the var list. */
/* If V had already been marked threadprivate, it doesn't matter
whether it had been used prior to this point. */
if (!VAR_P (v))
error_at (loc, "%qD is not a variable", v);
else if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v))
error_at (loc, "%qE declared %<threadprivate%> after first use", v);
else if (! is_global_var (v))
error_at (loc, "automatic variable %qE cannot be %<threadprivate%>", v);
else if (TREE_TYPE (v) == error_mark_node)
;
else if (! COMPLETE_TYPE_P (TREE_TYPE (v)))
error_at (loc, "%<threadprivate%> %qE has incomplete type", v);
else
{
if (! DECL_THREAD_LOCAL_P (v))
{
set_decl_tls_model (v, decl_default_tls_model (v));
/* If rtl has been already set for this var, call
make_decl_rtl once again, so that encode_section_info
has a chance to look at the new decl flags. */
if (DECL_RTL_SET_P (v))
make_decl_rtl (v);
}
C_DECL_THREADPRIVATE_P (v) = 1;
}
}
c_parser_skip_to_pragma_eol (parser);
}
/* Parse a transaction attribute (GCC Extension).
transaction-attribute:
attributes
[ [ any-word ] ]
The transactional memory language description is written for C++,
and uses the C++0x attribute syntax. For compatibility, allow the
bracket style for transactions in C as well. */
static tree
c_parser_transaction_attributes (c_parser *parser)
{
tree attr_name, attr = NULL;
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
return c_parser_attributes (parser);
if (!c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
return NULL_TREE;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_SQUARE, "expected %<[%>"))
goto error1;
attr_name = c_parser_attribute_any_word (parser);
if (attr_name)
{
c_parser_consume_token (parser);
attr = build_tree_list (attr_name, NULL_TREE);
}
else
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
error1:
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
return attr;
}
/* Parse a __transaction_atomic or __transaction_relaxed statement
(GCC Extension).
transaction-statement:
__transaction_atomic transaction-attribute[opt] compound-statement
__transaction_relaxed compound-statement
Note that the only valid attribute is: "outer".
*/
static tree
c_parser_transaction (c_parser *parser, enum rid keyword)
{
unsigned int old_in = parser->in_transaction;
unsigned int this_in = 1, new_in;
location_t loc = c_parser_peek_token (parser)->location;
tree stmt, attrs;
gcc_assert ((keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED)
&& c_parser_next_token_is_keyword (parser, keyword));
c_parser_consume_token (parser);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = c_parser_transaction_attributes (parser);
if (attrs)
this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
}
/* Keep track if we're in the lexical scope of an outer transaction. */
new_in = this_in | (old_in & TM_STMT_ATTR_OUTER);
parser->in_transaction = new_in;
stmt = c_parser_compound_statement (parser);
parser->in_transaction = old_in;
if (flag_tm)
stmt = c_finish_transaction (loc, stmt, this_in);
else
error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ?
"%<__transaction_atomic%> without transactional memory support enabled"
: "%<__transaction_relaxed %> "
"without transactional memory support enabled"));
return stmt;
}
/* Parse a __transaction_atomic or __transaction_relaxed expression
(GCC Extension).
transaction-expression:
__transaction_atomic ( expression )
__transaction_relaxed ( expression )
*/
static struct c_expr
c_parser_transaction_expression (c_parser *parser, enum rid keyword)
{
struct c_expr ret;
unsigned int old_in = parser->in_transaction;
unsigned int this_in = 1;
location_t loc = c_parser_peek_token (parser)->location;
tree attrs;
gcc_assert ((keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED)
&& c_parser_next_token_is_keyword (parser, keyword));
c_parser_consume_token (parser);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = c_parser_transaction_attributes (parser);
if (attrs)
this_in |= parse_tm_stmt_attr (attrs, 0);
}
parser->in_transaction = this_in;
matching_parens parens;
if (parens.require_open (parser))
{
tree expr = c_parser_expression (parser).value;
ret.original_type = TREE_TYPE (expr);
ret.value = build1 (TRANSACTION_EXPR, ret.original_type, expr);
if (this_in & TM_STMT_ATTR_RELAXED)
TRANSACTION_EXPR_RELAXED (ret.value) = 1;
SET_EXPR_LOCATION (ret.value, loc);
ret.original_code = TRANSACTION_EXPR;
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto error;
}
}
else
{
error:
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
}
parser->in_transaction = old_in;
if (!flag_tm)
error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ?
"%<__transaction_atomic%> without transactional memory support enabled"
: "%<__transaction_relaxed %> "
"without transactional memory support enabled"));
set_c_expr_source_range (&ret, loc, loc);
return ret;
}
/* Parse a __transaction_cancel statement (GCC Extension).
transaction-cancel-statement:
__transaction_cancel transaction-attribute[opt] ;
Note that the only valid attribute is "outer".
*/
static tree
c_parser_transaction_cancel (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree attrs;
bool is_outer = false;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_TRANSACTION_CANCEL));
c_parser_consume_token (parser);
attrs = c_parser_transaction_attributes (parser);
if (attrs)
is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0);
if (!flag_tm)
{
error_at (loc, "%<__transaction_cancel%> without "
"transactional memory support enabled");
goto ret_error;
}
else if (parser->in_transaction & TM_STMT_ATTR_RELAXED)
{
error_at (loc, "%<__transaction_cancel%> within a "
"%<__transaction_relaxed%>");
goto ret_error;
}
else if (is_outer)
{
if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0
&& !is_tm_may_cancel_outer (current_function_decl))
{
error_at (loc, "outer %<__transaction_cancel%> not "
"within outer %<__transaction_atomic%>");
error_at (loc, " or a %<transaction_may_cancel_outer%> function");
goto ret_error;
}
}
else if (parser->in_transaction == 0)
{
error_at (loc, "%<__transaction_cancel%> not within "
"%<__transaction_atomic%>");
goto ret_error;
}
return add_stmt (build_tm_abort_call (loc, is_outer));
ret_error:
return build1 (NOP_EXPR, void_type_node, error_mark_node);
}
/* Parse a single source file. */
void
c_parse_file (void)
{
/* Use local storage to begin. If the first token is a pragma, parse it.
If it is #pragma GCC pch_preprocess, then this will load a PCH file
which will cause garbage collection. */
c_parser tparser;
memset (&tparser, 0, sizeof tparser);
tparser.tokens = &tparser.tokens_buf[0];
the_parser = &tparser;
if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS)
c_parser_pragma_pch_preprocess (&tparser);
the_parser = ggc_alloc<c_parser> ();
*the_parser = tparser;
if (tparser.tokens == &tparser.tokens_buf[0])
the_parser->tokens = &the_parser->tokens_buf[0];
/* Initialize EH, if we've been told to do so. */
if (flag_exceptions)
using_eh_for_cleanups ();
c_parser_translation_unit (the_parser);
the_parser = NULL;
}
/* Parse the body of a function declaration marked with "__RTL".
The RTL parser works on the level of characters read from a
FILE *, whereas c_parser works at the level of tokens.
Square this circle by consuming all of the tokens up to and
including the closing brace, recording the start/end of the RTL
fragment, and reopening the file and re-reading the relevant
lines within the RTL parser.
This requires the opening and closing braces of the C function
to be on separate lines from the RTL they wrap.
Take ownership of START_WITH_PASS, if non-NULL. */
void
c_parser_parse_rtl_body (c_parser *parser, char *start_with_pass)
{
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
free (start_with_pass);
return;
}
location_t start_loc = c_parser_peek_token (parser)->location;
/* Consume all tokens, up to the closing brace, handling
matching pairs of braces in the rtl dump. */
int num_open_braces = 1;
while (1)
{
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_BRACE:
num_open_braces++;
break;
case CPP_CLOSE_BRACE:
if (--num_open_braces == 0)
goto found_closing_brace;
break;
case CPP_EOF:
error_at (start_loc, "no closing brace");
free (start_with_pass);
return;
default:
break;
}
c_parser_consume_token (parser);
}
found_closing_brace:
/* At the closing brace; record its location. */
location_t end_loc = c_parser_peek_token (parser)->location;
/* Consume the closing brace. */
c_parser_consume_token (parser);
/* Invoke the RTL parser. */
if (!read_rtl_function_body_from_file_range (start_loc, end_loc))
{
free (start_with_pass);
return;
}
/* If a pass name was provided for START_WITH_PASS, run the backend
accordingly now, on the cfun created above, transferring
ownership of START_WITH_PASS. */
if (start_with_pass)
run_rtl_passes (start_with_pass);
}
#include "gt-c-c-parser.h"
|
sync.c | /*
* Copyright (c) 2009, 2010, 2011, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <omp.h>
#include <arch/x86/barrelfish_kpi/asm_inlines_arch.h>
#define GANG_SCHEDULING
#undef MEASURE_SYNC
#define MEASURE
#define WORK_PERIOD 5000000000UL
#define STACK_SIZE (64 * 1024)
int main(int argc, char *argv[])
{
uint64_t now, start;
volatile uint64_t workcnt, workload = 0;
int64_t workmax = 1000;
int64_t i;
if(argc == 1) {
printf("calibrating...\n");
do {
workload = 0;
workmax *= 2;
start = rdtsc();
#pragma omp parallel private(i,workload)
for(i = 0; i < workmax; i++) {
#pragma omp barrier
workload++;
}
now = rdtsc();
} while(now - start < WORK_PERIOD);
printf("workmax = %ld\n", workmax);
return 0;
} else {
workmax = atol(argv[1]);
}
int nthreads = omp_get_max_threads();
if(argc == 3) {
nthreads = atoi(argv[2]);
assert(!"REVISE!!!");
bomp_bomp_init(nthreads);
omp_set_num_threads(nthreads);
}
printf("threads %d, workmax %ld, CPUs %d\n", nthreads, workmax,
omp_get_num_procs());
#ifdef MEASURE_SYNC
uint64_t waits[16] = {
0, 1000, 1000000, 1000000000, 500, 5000000, 5000000000, 3000000,
0, 1000, 1000000, 1000000000, 500, 5000000, 5000000000, 3000000
};
uint64_t ts[16][10];
printf("before sync:\n");
#pragma omp parallel private(workcnt)
{
for(int j = 0; j < waits[omp_get_thread_num()]; j++) {
workcnt++;
}
for(int j = 0; j < 10; j++) {
ts[omp_get_thread_num()][j] = rdtsc();
}
}
for(int j = 0; j < 10; j++) {
printf("timestamp %d: ", j);
for(int n = 1; n < nthreads; n++) {
printf("%ld ", ts[n][j] - ts[n - 1][j]);
}
printf("\n");
}
printf("after sync:\n");
#pragma omp parallel
{
bomp_synchronize();
for(int j = 0; j < 10; j++) {
ts[omp_get_thread_num()][j] = rdtsc();
}
}
for(int j = 0; j < 10; j++) {
printf("timestamp %d: ", j);
for(int n = 1; n < nthreads; n++) {
printf("%ld ", ts[n][j] - ts[n - 1][j]);
}
printf("\n");
}
#endif
#ifdef GANG_SCHEDULING
#pragma omp parallel
{
// bomp_synchronize();
}
#endif
start = rdtsc();
#ifdef MEASURE
# define MAXTHREADS 16
# define WORKMAX 10000
static uint64_t starta[MAXTHREADS][WORKMAX];
static uint64_t end1[MAXTHREADS][WORKMAX];
static uint64_t end2[MAXTHREADS][WORKMAX];
#endif
// Do some work
#pragma omp parallel private(workcnt,i)
for(i = 0; i < workmax; i++) {
#ifdef MEASURE
starta[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc();
#endif
workcnt++;
#ifdef MEASURE
end1[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc();
#endif
#pragma omp barrier
#ifdef MEASURE
end2[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc();
#endif
}
now = rdtsc();
#ifdef MEASURE
printf("avg compute time: ");
for(int n = 0; n < nthreads; n++) {
uint64_t sum = 0, min = end1[0][0], max = 0;
for(i = 0; i < WORKMAX; i++) {
uint64_t val = end1[n][i] - starta[n][i];
sum += val;
min = val < min ? val : min;
max = val > max ? val : max;
}
printf("%lu(%lu,%lu) ", sum / WORKMAX, min, max);
}
printf("\n");
#if 0
printf("wait time dump:\n");
for(i = 0; i < WORKMAX; i++) {
for(int n = 0; n < nthreads; n++) {
uint64_t val = end2[n][i] - end1[n][i];
printf("%lu ", val);
}
printf("\n");
}
#endif
printf("avg wait time: ");
for(int n = 0; n < nthreads; n++) {
uint64_t sum = 0, min = end2[0][0], max = 0;
for(i = 0; i < WORKMAX; i++) {
uint64_t val = end2[n][i] - end1[n][i];
sum += val;
min = val < min ? val : min;
max = val > max ? val : max;
}
printf("%lu(%lu,%lu) ", sum / WORKMAX, min, max);
}
printf("\n");
#endif
printf("%s: threads %d, compute time %lu ticks\n", argv[0], nthreads, now - start);
for(;;);
return 0;
}
|
omp_dynamic_shared_memory.c | // RUN: %libomptarget-compile-nvptx64-nvidia-cuda
// RUN: env LIBOMPTARGET_SHARED_MEMORY_SIZE=256 \
// RUN: %libomptarget-run-nvptx64-nvidia-cuda | %fcheck-nvptx64-nvidia-cuda
// REQUIRES: nvptx64-nvidia-cuda
#include <omp.h>
#include <stdio.h>
void *llvm_omp_get_dynamic_shared();
int main() {
int x;
#pragma omp target parallel map(from : x)
{
int *buf = llvm_omp_get_dynamic_shared() + 252;
#pragma omp barrier
if (omp_get_thread_num() == 0)
*buf = 1;
#pragma omp barrier
if (omp_get_thread_num() == 1)
x = *buf;
}
// CHECK: PASS
if (x == 1 && llvm_omp_get_dynamic_shared() == NULL)
printf("PASS\n");
}
|
merging.c | #include "tools.h"
#include <string.h>
#include <stdio.h>
#include <fitsio.h>
#include <time.h>
#include <math.h>
#include <stdlib.h> /* for exit */
#include <sys/resource.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX_FILES 500
#define MAX_FILE_LENGTH 500
#define MAX_FILES_PER_THREAD 500
#ifndef NAN
#define NAN (0.0/0.0)
#endif
/**************************************************************
*
* Cubes combination with median, mean, sigma clipping, etc.
*
**************************************************************/
char *mystrdup (const char *s) {
char *d = malloc (strlen (s) + 1); // Space for length plus nul
if (d == NULL) return NULL; // No memory
strcpy (d,s); // Copy the characters
return d; // Return the new string
}
// split input files list
int split_files_list(char* input, char* filenames[]) {
int nfiles=0;
const char delim[2] = "\n";
char *token;
token = strtok(input, delim);
while( token != NULL ) {
filenames[nfiles++] = mystrdup(token);
if (nfiles > MAX_FILES) {
printf("ERROR: Too many files, limit is %d \n", MAX_FILES);
exit(EXIT_FAILURE);
}
/* printf("%3d: %s\n", nfiles, filenames[nfiles-1]); */
token = strtok(NULL, delim);
}
/* printf("nfiles: %d\n",nfiles); */
return nfiles;
}
int get_max_threads(int nfiles, int typ_var) {
struct rlimit limit;
/* Get max number of files. */
if (getrlimit(RLIMIT_NOFILE, &limit) != 0) {
printf("getrlimit() failed");
exit(EXIT_FAILURE);
}
int num_nthreads = limit.rlim_cur / nfiles * 0.9;
if (1000/nfiles < num_nthreads) {
//limit of cfitsio
num_nthreads = 1000 / nfiles;
}
printf("num_nthreads: %d\n", num_nthreads);
if (typ_var==0) {
num_nthreads = num_nthreads / 2;
}
int nthreads=1;
#ifdef _OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
printf("omp_get_num_threads: %d\n", nthreads);
#endif
if (nthreads < num_nthreads) {
num_nthreads=nthreads;
}
printf("Using %d threads\n", num_nthreads);
return num_nthreads;
}
int open_fits(char *input, char *extname, fitsfile **fdata, long naxes[]) {
int status = 0; // CFITSIO status value MUST be initialized to zero!
int naxis=0;
char filename[MAX_FILE_LENGTH];
sprintf(filename, "%s[%s]", input, extname);
fits_open_file(fdata, filename, READONLY, &status); // open DATA extension
if (status) {
fits_report_error(stderr, status);
exit(EXIT_FAILURE);
}
fits_get_img_dim(*fdata, &naxis, &status); // read dimensions
if (naxis != 3) {
printf("Error: %s not a cube\n", filename);
exit(EXIT_FAILURE);
}
fits_get_img_size(*fdata, 3, naxes, &status); // read shape
return EXIT_SUCCESS;
}
int compute_loop_limits(long naxes, int* limits) {
#ifdef _OPENMP
int rang = omp_get_thread_num(); //current thread number
int nthreads = omp_get_num_threads(); //number of threads
#else
int rang = 0;
int nthreads = 1;
#endif
// start and end of the loop for the current thread
if (nthreads<naxes) {
int nloops = (int) naxes/nthreads +1;
limits[0] = rang*nloops + 1;
limits[1] = MIN((rang+1)*nloops, naxes);
/* printf("rang: %d, nloops: %d, nthreads: %d, start: %d, end: %d\n", */
/* rang, nloops, nthreads, limits[0], limits[1]); */
}
else {
limits[0] = rang+1;
limits[1] = MIN(rang+2, naxes);
/* printf("rang: %d, nthreads: %d, start: %d, end: %d\n", */
/* rang, nthreads, limits[0], limits[1]); */
}
return EXIT_SUCCESS;
}
void report_progress(time_t *ref, long firstpix[], int limits[], float value) {
time_t now;
struct tm *info;
char buffer[80];
time(&now);
if ((value >= 0) || ((now - *ref) > 60)) {
*ref = now;
info = localtime(&now);
strftime(buffer, 80, "%x - %I:%M%p", info);
if (value < 0) {
value = firstpix[2] * 100.0 / (limits[1] - limits[0]);
}
printf("%s %3.1f%%\n", buffer, value);
fflush(stdout);
}
}
int mpdaf_merging_median(char* input, double* data, float* expmap, int* valid_pix)
{
char* filenames[MAX_FILES];
int nfiles=0;
time_t now;
time(&now);
// read input files list
nfiles = split_files_list(input, filenames);
#ifdef _OPENMP
int num_nthreads = get_max_threads(nfiles, -1);
omp_set_num_threads(num_nthreads); // Set number of threads to use
// create threads
#pragma omp parallel shared(filenames, nfiles, data, expmap, valid_pix)
{
#endif
fitsfile *fdata[MAX_FILES_PER_THREAD];
int status = 0; // CFITSIO status value MUST be initialized to zero!
long naxes[3] = {1,1,1}, bnaxes[3] = {1,1,1};
int i, ii, n;
long firstpix[3] = {1,1,1};
int valid[nfiles];
// read first file
open_fits(filenames[0], "data", &fdata[0], naxes);
#pragma omp master
{
printf("Read fits files\n");
printf("naxes %zu %zu %zu\n", naxes[0], naxes[1], naxes[2]);
report_progress(&now, NULL, NULL, 0);
}
// read other files and compare that the shape is the same
for (i=1; i<nfiles; i++) {
open_fits(filenames[i], "data", &fdata[i], bnaxes);
if (naxes[0] != bnaxes[0] || naxes[1] != bnaxes[1] ||
naxes[2] != bnaxes[2]) {
printf("Error: %s don't have same size\n", filenames[i]);
exit(EXIT_FAILURE);
}
}
// start and end of the loop for the current thread
int limits[2];
compute_loop_limits(naxes[2], limits);
firstpix[0] = 1;
//initialization
int *indx;
double *pix[MAX_FILES_PER_THREAD], *wdata;
long npixels = naxes[0];
for (i=0; i<nfiles; i++) {
pix[i] = (double *) malloc(npixels * sizeof(double));
if (pix[i] == NULL) {
printf("Memory allocation error\n");
exit(EXIT_FAILURE);
}
valid[i] = 0;
}
wdata = (double *) malloc(nfiles * sizeof(double));
indx = (int *) malloc(nfiles * sizeof(int));
for (firstpix[2] = limits[0]; firstpix[2] <= limits[1]; firstpix[2]++) {
for (firstpix[1] = 1; firstpix[1] <= naxes[1]; firstpix[1]++) {
int index0 = (firstpix[1]-1)*naxes[0] + (firstpix[2]-1)*naxes[0]*naxes[1];
for (i=0; i<nfiles; i++) {
if (fits_read_pix(fdata[i], TDOUBLE, firstpix, npixels, NULL, pix[i],
NULL, &status))
break;
}
for(ii=0; ii< npixels; ii++) {
n = 0;
for (i=0; i<nfiles; i++) {
if (!isnan(pix[i][ii])) {
wdata[n] = pix[i][ii];
indx[n] = n;
n = n + 1;
valid[i] = valid[i] + 1;
}
}
int index = ii + index0;
if (n==0) {
data[index] = NAN; //mean value
expmap[index] = 0; //exp map
} else if (n==1) {
data[index] = wdata[0]; //mean value
expmap[index] = 1; //exp map
} else {
data[index] = mpdaf_median(wdata,n,indx);
expmap[index] = n;
}
}
}
if (firstpix[2] % 100 == 0) {
#pragma omp master
{
report_progress(&now, firstpix, limits, -1);
}
}
}
for (i=0; i<nfiles; i++) {
#pragma omp atomic
valid_pix[i] += valid[i];
}
free(wdata);
free(indx);
for (i=0; i<nfiles; i++) {
free(pix[i]);
fits_close_file(fdata[i], &status);
}
if (status) {
fits_report_error(stderr, status);
exit(EXIT_FAILURE);
}
#ifdef _OPENMP
}
#endif
report_progress(&now, NULL, NULL, 100);
return EXIT_SUCCESS;
}
// var=0: 'propagate'
// var=1: 'stat_mean'
// var=2: 'stat_one'
int mpdaf_merging_sigma_clipping(
char* input,
double* data,
double* var,
float* expmap,
double* scale,
double* offset,
double* weight,
int* selected_pix,
int* valid_pix,
int nmax,
double nclip_low,
double nclip_up,
int nstop,
int typ_var,
int mad
)
{
char* filenames[MAX_FILES];
int nfiles=0;
time_t now;
time(&now);
printf("merging cube using mean with sigma clipping\n");
printf("nmax = %d\n", nmax);
printf("nclip_low = %f\n", nclip_low);
printf("nclip_high = %f\n", nclip_up);
printf("nstop = %d\n", nstop);
printf("Using weights:\n");
// read input files list
nfiles = split_files_list(input, filenames);
int j;
for (j=0; j < nfiles; j++){
printf("%3d: %s - weight: %f\n", j+1, filenames[j], weight[j]);
}
printf("nfiles: %d\n",nfiles);
#ifdef _OPENMP
int num_nthreads = get_max_threads(nfiles, typ_var);
omp_set_num_threads(num_nthreads); // Set number of threads to use
// create threads
#pragma omp parallel shared(filenames, nfiles, data, var, expmap, scale, weight, valid_pix, nmax, nclip_low, nclip_up, nstop, selected_pix, typ_var, mad)
{
#endif
fitsfile *fdata[MAX_FILES_PER_THREAD], *fvar[MAX_FILES_PER_THREAD];
int status = 0; // CFITSIO status value MUST be initialized to zero!
long naxes[3] = {1,1,1}, bnaxes[3] = {1,1,1};
int i, ii, n;
long firstpix[3] = {1,1,1};
int valid[nfiles], select[nfiles];
// read first file
open_fits(filenames[0], "data", &fdata[0], naxes);
#pragma omp master
{
printf("Read fits files\n");
printf("naxes %zu %zu %zu\n", naxes[0], naxes[1], naxes[2]);
report_progress(&now, NULL, NULL, 0);
}
// read other files and compare that the shape is the same
for (i=1; i<nfiles; i++) {
open_fits(filenames[i], "data", &fdata[i], bnaxes);
if (naxes[0] != bnaxes[0] || naxes[1] != bnaxes[1] ||
naxes[2] != bnaxes[2]) {
printf("Error: %s don't have same size\n", filenames[i]);
exit(EXIT_FAILURE);
}
}
if (typ_var==0) {
// read variance extension
for (i=0; i<nfiles; i++) {
open_fits(filenames[i], "stat", &fvar[i], bnaxes);
if (naxes[0] != bnaxes[0] || naxes[1] != bnaxes[1] ||
naxes[2] != bnaxes[2]) {
printf("Error: %s don't have same size\n", filenames[i]);
exit(EXIT_FAILURE);
}
}
}
// start and end of the loop for the current thread
int limits[2];
compute_loop_limits(naxes[2], limits);
firstpix[0] = 1;
//initialization
double *pix[MAX_FILES_PER_THREAD], *pixvar[MAX_FILES_PER_THREAD], *wdata, *wweight, *wvar=NULL;
int *indx, *files_id;
double x[4];
long npixels = naxes[0] * naxes[1];
for (i=0; i<nfiles; i++)
{
pix[i] = (double *) malloc(npixels * sizeof(double));
if (pix[i] == NULL) {
printf("Memory allocation error\n");
exit(EXIT_FAILURE);
}
valid[i] = 0;
select[i] = 0;
}
if (typ_var==0)
{
for (i=0; i<nfiles; i++)
{
pixvar[i] = (double *) malloc(npixels * sizeof(double));
if (pix[i] == NULL) {
printf("Memory allocation error\n");
exit(EXIT_FAILURE);
}
}
wvar = (double *) malloc(nfiles * sizeof(double));
}
wdata = (double *) malloc(nfiles * sizeof(double));
wweight = (double *) malloc(nfiles * sizeof(double));
indx = (int *) malloc(nfiles * sizeof(int));
files_id = (int *) malloc(nfiles * sizeof(int));
for (firstpix[2] = limits[0]; firstpix[2] <= limits[1]; firstpix[2]++)
{
int index0 = (firstpix[2] - 1) * npixels;
// read data values for the current plane, and optionally stat
for (i=0; i<nfiles; i++) {
if (fits_read_pix(fdata[i], TDOUBLE, firstpix, npixels,
NULL, pix[i], NULL, &status))
break;
}
if (typ_var==0) {
for (i=0; i<nfiles; i++) {
if (fits_read_pix(fvar[i], TDOUBLE, firstpix, npixels,
NULL, pixvar[i], NULL, &status))
break;
}
}
for(ii=0; ii< npixels; ii++) {
n = 0; // Only the non-nan pixels will increase n
for (i=0; i<nfiles; i++) {
if (!isnan(pix[i][ii])) {
wdata[n] = (offset[i] + pix[i][ii]) * scale[i];
wweight[n] = weight[i];
files_id[n] = i;
indx[n] = n;
if (typ_var==0) {
wvar[n] = pixvar[i][ii] * scale[i] * scale[i];
}
n += 1;
// Need to here somehow keep track of the weights
valid[i] += 1;
}
}
int index = ii + index0;
if (n==0) {
data[index] = NAN; //mean value
expmap[index] = 0; //exp map
var[index] = NAN; //var
} else if (n==1) {
data[index] = wdata[0]; //mean value
expmap[index] = 1. / wweight[0]; //exp map // FIXME here also the weighting needs to be kept track abouta
if (typ_var==0) //var
var[index] = wvar[0];
else
var[index] = NAN;
select[files_id[0]] += 1;
} else {
if (mad==1) {
// FIXME: Weighting not yet implemented ..
mpdaf_mean_madsigma_clip(wdata, n, x, nmax, nclip_low,
nclip_up, nstop, indx);
expmap[index] = x[2]; // exp map; (simple sum of files)
} else {
mpdaf_weighted_mean_sigma_clip(wdata, wweight, n, x, nmax, nclip_low,
nclip_up, nstop, indx);
expmap[index] = x[3]; // exp map (sum of weights) sum of weights
}
data[index] = x[0]; // mean value
if (typ_var==0) { // var
/* Note the index has changed during the clipping, that is why this is possible */
var[index] = mpdaf_weighted_mean_var(wvar, wweight, x[2], indx);
} else {
if (x[2]>1) {
var[index] = (x[1] * x[1]);
if (typ_var==1) {
var[index] /= (x[2] - 1);
}
} else {
var[index] = NAN;
}
}
for (i=0; i<x[2]; i++) {
select[files_id[indx[i]]] += 1;
}
}
}
if (firstpix[2] % 100 == 0) {
#pragma omp master
{
report_progress(&now, firstpix, limits, -1);
}
}
}
for (i=0; i<nfiles; i++) {
#pragma omp atomic
valid_pix[i] += valid[i];
#pragma omp atomic
selected_pix[i] += select[i];
}
free(wdata);
free(wweight);
free(indx);
free(files_id);
for (i=0; i<nfiles; i++) {
free(pix[i]);
fits_close_file(fdata[i], &status);
}
if (typ_var==0) {
free(wvar);
for (i=0; i<nfiles; i++) {
free(pixvar[i]);
fits_close_file(fvar[i], &status);
}
}
if (status) {
fits_report_error(stderr, status);
exit(EXIT_FAILURE);
}
#ifdef _OPENMP
}
#endif
report_progress(&now, NULL, NULL, 100);
return EXIT_SUCCESS;
}
|
GB_unop__one_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__one_int16_int16
// op(A') function: GB_unop_tran__one_int16_int16
// C type: int16_t
// A type: int16_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = 1 ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__one_int16_int16
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__one_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp-graph.h | #ifndef __OMP_GRAPH_RUNTIME_H__
#define __OMP_GRAPH_RUNTIME_H__
#include "../data-structures/data-structures.h"
namespace __core__ {
namespace __wavefront__ {
namespace __runtime__ {
enum __ompGraphVersion__ {
OMPOrderedGraph,
OMPTopologicalSort,
OMPUserOrder,
OMPUserOrderDebug
};
typedef __ompGraphVersion__ ompGraphVersion;
template <ompGraphVersion version=OMPOrderedGraph,typename FT=void,typename VertexType=void,typename EdgeType=void,typename Allocator=void,typename IT=int,typename...Args,enable_IT<eq_CE(version,OMPOrderedGraph)> = 0>
void ompGraph(GraphCXS<VertexType,EdgeType,Allocator,IT>& graph,FT& function,int threadnum,Args...args) {
IT* ptr=graph.ptr();
#pragma omp parallel num_threads(threadnum)
{
#pragma omp single
{
for(size_t i=0;i<graph.v();++i) {
IT pos=graph.ptr(i),size=graph.ptr(i+1)-graph.ptr(i);
if(size>0) {
#pragma omp task depend(iterator(it=0:size), in:ptr[graph.indxs(pos+it)]) depend(out:ptr[i])
{
function(i,omp_get_thread_num(),args...);
}
}
else
break;
}
}
}
}
template <ompGraphVersion version=OMPOrderedGraph,typename FT=void,typename VertexType=void,typename EdgeType=void,typename Allocator=void,typename IT=int,typename...Args,enable_IT<eq_CE(version,OMPTopologicalSort)> = 0>
void ompGraph(GraphCXS<VertexType,EdgeType,Allocator,IT>& graph,FT& function,int threadnum,Args...args) {
IT* ptr=graph.ptr();
std::vector<int> indegree(graph.v(),0);
for(size_t i=0;i<graph.e();++i)
indegree[graph.indxs(i)]+=1;
std::stack<IT> s;
for(size_t i=0;i<graph.v();++i)
if(indegree[i]==0)
s.push(i);
#pragma omp parallel num_threads(threadnum)
{
#pragma omp single
{
while(!s.empty()) {
IT v=s.top();
s.pop();
IT pos=graph.ptr(v),size=graph.ptr(v+1)-graph.ptr(v);
if(size>0) {
#pragma omp task depend(iterator(it=0:size), in:ptr[graph.indxs(pos+it)]) depend(out:ptr[v])
{
function(v,omp_get_thread_num(),args...);
}
fn(v,graph,args...);
for(IT j=graph.ptr(v);j<graph.ptr(v+1);++j) {
indegree[graph.indxs(j)]-=1;
if(indegree[graph.indxs(j)]==0)
s.push(graph.indxs(j));
}
}
}
}
}
}
template <ompGraphVersion version=OMPOrderedGraph,typename FT=void,typename VertexType=void,typename EdgeType=void,typename Allocator=void,typename IT=int,typename...Args,enable_IT<eq_CE(version,OMPUserOrder)> = 0>
void ompGraph(GraphCXS<VertexType,EdgeType,Allocator,IT>& graph,FT& function,const std::vector<int>& order,int threadnum,Args...args) {
IT* ptr=graph.ptr();
#pragma omp parallel num_threads(threadnum)
{
#pragma omp single
{
for(size_t i=0;i<order.size();++i) {
int v=order[i];
IT pos=graph.ptr(v),size=graph.ptr(v+1)-graph.ptr(v);
if(size>0) {
#pragma omp task depend(iterator(it=0:size), in:ptr[graph.indxs(pos+it)]) depend(out:ptr[v])
function(v,omp_get_thread_num(),args...);
}
else
#pragma omp task depend(out:ptr[v])
function(v,omp_get_thread_num(),args...);
}
}
}
}
template <ompGraphVersion version=OMPOrderedGraph,typename FT=void,typename VertexType=void,typename EdgeType=void,typename Allocator=void,typename IT=int,typename...Args,enable_IT<eq_CE(version,OMPUserOrder)> = 0>
void ompGraph(GraphCXS<VertexType,EdgeType,Allocator,IT>& graph,FT& function,const std::vector<int>& order,Args...args) {
IT* ptr=graph.ptr();
#pragma omp parallel
{
#pragma omp single
{
for(size_t i=0;i<order.size();++i) {
int v=order[i];
IT pos=graph.ptr(v),size=graph.ptr(v+1)-graph.ptr(v);
if(size>0) {
#pragma omp task depend(iterator(it=0:size), in:ptr[graph.indxs(pos+it)]) depend(out:ptr[v])
function(v,omp_get_thread_num(),args...);
}
else
#pragma omp task depend(out:ptr[v])
function(v,omp_get_thread_num(),args...);
}
}
}
}
template <ompGraphVersion version=OMPOrderedGraph,typename FT=void,typename VertexType=void,typename EdgeType=void,typename Allocator=void,typename IT=int,typename...Args,enable_IT<eq_CE(version,OMPUserOrderDebug)> = 0>
void ompGraph(GraphCXS<VertexType,EdgeType,Allocator,IT>& graph,FT& function,const std::vector<int>& order,int threadnum,Args...args) {
IT* ptr=graph.ptr();
#pragma omp parallel num_threads(threadnum)
{
#pragma omp single
{
for(size_t i=0;i<order.size();++i) {
int v=order[i];
IT pos=graph.ptr(v),size=graph.ptr(v+1)-graph.ptr(v);
if(size>0){
#pragma omp task depend(iterator(it=0:size), in:ptr[graph.indxs(pos+it)]) depend(out:ptr[v])
{
cpu_timer timer;
int tid=omp_get_thread_num();
timer.start();
function(v,tid,args...);
timer.stop();
graph[v].etime=timer.elapsed_time();
graph[v].tid=tid;
}
}
else {
#pragma omp task depend(out:ptr[v])
{
cpu_timer timer;
int tid=omp_get_thread_num();
timer.start();
function(v,tid,args...);
timer.stop();
graph[v].etime=timer.elapsed_time();
graph[v].tid=tid;
}
}
}
}
}
}
template <ompGraphVersion version=OMPOrderedGraph,typename FT=void,typename VertexType=void,typename EdgeType=void,typename Allocator=void,typename IT=int,typename...Args,enable_IT<eq_CE(version,OMPUserOrderDebug)> = 0>
void ompGraph(GraphCXS<VertexType,EdgeType,Allocator,IT>& graph,FT& function,const std::vector<int>& order,Args...args) {
IT* ptr=graph.ptr();
#pragma omp parallel
{
#pragma omp single
{
for(size_t i=0;i<order.size();++i) {
int v=order[i];
IT pos=graph.ptr(v),size=graph.ptr(v+1)-graph.ptr(v);
if(size>0){
#pragma omp task depend(iterator(it=0:size), in:ptr[graph.indxs(pos+it)]) depend(out:ptr[v])
{
cpu_timer timer;
int tid=omp_get_thread_num();
timer.start();
function(v,tid,args...);
timer.stop();
graph[v].etime=timer.elapsed_time();
graph[v].tid=tid;
}
}
else {
#pragma omp task depend(out:ptr[v])
{
cpu_timer timer;
int tid=omp_get_thread_num();
timer.start();
function(v,tid,args...);
timer.stop();
graph[v].etime=timer.elapsed_time();
graph[v].tid=tid;
}
}
}
}
}
}
//template <ompGraphVersion version=OMPOrderedGraph,typename FT=void,typename VertexType=void,typename EdgeType=void,typename Allocator=void,typename IT=int,typename...Args,enable_IT<eq_CE(version,OMPUserOrder)> = 0>
//void ompGraph(GraphCXS<VertexType,EdgeType,Allocator,IT>& graph,FT& function,const std::vector<int>& order,int threadnum,Args...args) {
// IT* ptr=graph.ptr();
//#pragma omp parallel num_threads(threadnum)
// {
// #pragma omp single
// {
// for(size_t i=0;i<order.size();++i) {
// int v=order[i];
// IT pos=graph.ptr(v),size=graph.ptr(v+1)-graph.ptr(v);
//#pragma omp task depend(iterator(it=0:size), in:ptr[graph.indxs(pos+it)]) depend(out:ptr[v])
// {
// function(v,omp_get_thread_num(),args...);
// }
// }
// }
// }
//}
//template <ompGraphVersion version=OMPOrderedGraph,typename FT=void,typename VertexType=void,typename EdgeType=void,typename Allocator=void,typename IT=int,typename...Args,enable_IT<eq_CE(version,OMPUserOrderDebug)> = 0>
//void ompGraph(GraphCXS<VertexType,EdgeType,Allocator,IT>& graph,FT& function,const std::vector<int>& order,int threadnum,Args...args) {
// IT* ptr=graph.ptr();
//#pragma omp parallel num_threads(threadnum)
// {
// #pragma omp single
// {
// for(size_t i=0;i<order.size();++i) {
// int v=order[i];
// IT pos=graph.ptr(v),size=graph.ptr(v+1)-graph.ptr(v);
//#pragma omp task depend(iterator(it=0:size), in:ptr[graph.indxs(pos+it)]) depend(out:ptr[v])
// {
// cpu_timer timer;
// int tid=omp_get_thread_num();
// timer.start();
// function(v,tid,args...);
// timer.stop();
// graph[v].etime=timer.elapsed_time();
// graph[v].tid=tid;
// }
// }
// }
// }
//}
}
}
}
#endif
|
result_control.c | void result_control(unsigned int tsamp , double *control_mean, double *control_sd, double *test_mean, double *test_sd, double *result_mean, double *result_sd){
double temp_mean = 0;
double temp_sd = 0;
#pragma omp parallel for reduction (+: temp_mean, temp_sd)
for(unsigned int i = 0; i < tsamp; i++){
temp_mean += (control_mean[i] - test_mean[i])*(control_mean[i] - test_mean[i]);
temp_sd += (control_sd[i] - test_sd[i])*(control_sd[i] - test_sd[i]);
}
result_mean[0] = temp_mean;
result_sd[0] = temp_sd;
}
|
pinvr.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
void pinvr()
{
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
if (timeron) timer_start(t_pinvr);
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
for (k = 1; k <= nz2; k++) {
for (j = 1; j <= ny2; j++) {
for (i = 1; i <= nx2; i++) {
r1 = rhs[k][j][i][0];
r2 = rhs[k][j][i][1];
r3 = rhs[k][j][i][2];
r4 = rhs[k][j][i][3];
r5 = rhs[k][j][i][4];
t1 = bt * r1;
t2 = 0.5 * ( r4 + r5 );
rhs[k][j][i][0] = bt * ( r4 - r5 );
rhs[k][j][i][1] = -r3;
rhs[k][j][i][2] = r2;
rhs[k][j][i][3] = -t1 + t2;
rhs[k][j][i][4] = t1 + t2;
}
}
}
if (timeron) timer_stop(t_pinvr);
}
|
mixedulm_linear_solver.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_MIXEDULM_SOLVER_H_INCLUDED )
#define KRATOS_MIXEDULM_SOLVER_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <sstream>
#include <cstddef>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "linear_solvers/reorderer.h"
#include "linear_solvers/iterative_solver.h"
#include "utilities/openmp_utils.h"
#include "contact_structural_mechanics_application_variables.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
#include "custom_utilities/logging_settings.hpp"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class MixedULMLinearSolver
* @ingroup ContactStructuralMechanicsApplication
* @brief This solver is designed for the solution of mixed U-LM problems (this solver in particular is optimized for dual LM, to avoid the resolution).
* @details It uses a block structure diving the matrix in UU LMLM ULM LMU blocks
* and uses "standard" linear solvers for the different blocks as well as a GMRES for the outer part
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class MixedULMLinearSolver :
public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>
{
public:
///@}
///@name Enums
///@{
/// This enum is used to identify each index whick kind is
enum class BlockType {
OTHER,
MASTER,
SLAVE_INACTIVE,
SLAVE_ACTIVE,
LM_INACTIVE,
LM_ACTIVE
};
///@name Type Definitions
///@{
/// Pointer definition of MixedULMLinearSolver
KRATOS_CLASS_POINTER_DEFINITION (MixedULMLinearSolver);
/// The base class corresponds to the an iterative solver
typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType;
/// The base class for the linear solver
typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> LinearSolverType;
/// The pointer to a linear solver
typedef typename LinearSolverType::Pointer LinearSolverPointerType;
/// The sparse matrix type
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
/// The vector type
typedef typename TSparseSpaceType::VectorType VectorType;
/// The dense matrix type
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
/// The dense vector type
typedef typename TDenseSpaceType::VectorType DenseVectorType;
/// The node type
typedef Node<3> NodeType;
/// The definition of the dof type
typedef typename ModelPart::DofType DofType;
/// The array containing the dofs
typedef typename ModelPart::DofsArrayType DofsArrayType;
/// An array of conditions
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/// An array of nodes
typedef ModelPart::NodesContainerType NodesArrayType;
/// The size type
typedef std::size_t SizeType;
/// The index type
typedef std::size_t IndexType;
/// A vector of indexes
typedef DenseVector<IndexType> IndexVectorType;
/// A vector of types
typedef DenseVector<BlockType> BlockTypeVectorType;
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
* @param pSolverDispBlock The linear solver used for the displacement block
* @param MaxTolerance The maximal tolrance considered
* @param MaxIterationNumber The maximal number of iterations
*/
MixedULMLinearSolver (
LinearSolverPointerType pSolverDispBlock,
const double MaxTolerance,
const std::size_t MaxIterationNumber
) : BaseType (MaxTolerance, MaxIterationNumber),
mpSolverDispBlock(pSolverDispBlock)
{
// Initializing the remaining variables
mBlocksAreAllocated = false;
mIsInitialized = false;
}
/**
* @brief Second constructor, it uses a Kratos parameters as input instead of direct input
* @param pSolverDispBlock The linear solver used for the displacement block
* @param ThisParameters The configuration parameters considered
*/
MixedULMLinearSolver(
LinearSolverPointerType pSolverDispBlock,
Parameters ThisParameters = Parameters(R"({})")
): BaseType (),
mpSolverDispBlock(pSolverDispBlock)
{
KRATOS_TRY
// Now validate agains defaults -- this also ensures no type mismatch
Parameters default_parameters = GetDefaultParameters();
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// Initializing the remaining variables
this->SetTolerance( ThisParameters["tolerance"].GetDouble() );
this->SetMaxIterationsNumber( ThisParameters["max_iteration_number"].GetInt() );
mEchoLevel = ThisParameters["echo_level"].GetInt();
mBlocksAreAllocated = false;
mIsInitialized = false;
KRATOS_CATCH("")
}
/// Copy constructor.
MixedULMLinearSolver (const MixedULMLinearSolver& rOther)
: BaseType(rOther),
mpSolverDispBlock(rOther.mpSolverDispBlock),
mBlocksAreAllocated(rOther.mBlocksAreAllocated),
mIsInitialized(rOther.mIsInitialized),
mMasterIndices(rOther.mMasterIndices),
mSlaveInactiveIndices(rOther.mSlaveInactiveIndices),
mSlaveActiveIndices(rOther.mSlaveActiveIndices),
mLMInactiveIndices(rOther.mLMInactiveIndices),
mLMActiveIndices(rOther.mLMActiveIndices),
mOtherIndices(rOther.mOtherIndices),
mGlobalToLocalIndexing(rOther.mGlobalToLocalIndexing),
mWhichBlockType(rOther.mWhichBlockType),
mKDispModified(rOther.mKDispModified),
mKLMAModified(rOther.mKLMAModified),
mKLMIModified(rOther.mKLMIModified),
mKSAN(rOther.mKSAN),
mKSAM(rOther.mKSAM),
mKSASI(rOther.mKSASI),
mKSASA(rOther.mKSASA),
mPOperator(rOther.mPOperator),
mCOperator(rOther.mCOperator),
mResidualLMActive(rOther.mResidualLMActive),
mResidualLMInactive(rOther.mResidualLMInactive),
mResidualDisp(rOther.mResidualDisp),
mLMActive(rOther.mLMActive),
mLMInactive(rOther.mLMInactive),
mDisp(rOther.mDisp),
mEchoLevel(rOther.mEchoLevel),
mFileCreated(rOther.mFileCreated)
{
}
/// Destructor.
~MixedULMLinearSolver() override {}
///@}
///@name Operators
///@{
/// Assignment operator.
MixedULMLinearSolver& operator= (const MixedULMLinearSolver& Other)
{
return *this;
}
///@}
///@name Operations
///@{
/**
* @brief This function is designed to be called as few times as possible. It creates the data structures
* that only depend on the connectivity of the matrix (and not on its coefficients)
* @details So that the memory can be allocated once and expensive operations can be done only when strictly
* needed
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void Initialize (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
if (mBlocksAreAllocated == true) {
mpSolverDispBlock->Initialize(mKDispModified, mDisp, mResidualDisp);
mIsInitialized = true;
} else
KRATOS_DETAIL("MixedULM Initialize") << "Linear solver intialization is deferred to the moment at which blocks are available" << std::endl;
}
/**
* @brief This function is designed to be called every time the coefficients change in the system
* that is, normally at the beginning of each solve.
* @details For example if we are implementing a direct solver, this is the place to do the factorization
* so that then the backward substitution can be performed effectively more than once
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void InitializeSolutionStep (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
// Copy to local matrices
if (mBlocksAreAllocated == false) {
FillBlockMatrices (true, rA, rX, rB);
mBlocksAreAllocated = true;
} else {
FillBlockMatrices (false, rA, rX, rB);
mBlocksAreAllocated = true;
}
if(mIsInitialized == false)
this->Initialize(rA,rX,rB);
mpSolverDispBlock->InitializeSolutionStep(mKDispModified, mDisp, mResidualDisp);
}
/**
* @brief This function actually performs the solution work, eventually taking advantage of what was done before in the
* @details Initialize and InitializeSolutionStep functions.
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void PerformSolutionStep (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
// Auxiliar size
const SizeType lm_active_size = mLMActiveIndices.size();
const SizeType lm_inactive_size = mLMInactiveIndices.size();
const SizeType total_disp_size = mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + mSlaveActiveIndices.size();
// Get the u and lm residuals
GetUPart (rB, mResidualDisp);
// Solve u block
if (mDisp.size() != total_disp_size)
mDisp.resize(total_disp_size, false);
mpSolverDispBlock->Solve (mKDispModified, mDisp, mResidualDisp);
// Write back solution
SetUPart(rX, mDisp);
// Solve LM
if (lm_active_size > 0) {
// Now we compute the residual of the LM
GetLMAPart (rB, mResidualLMActive);
// LM = D⁻1*rLM
if (mLMActive.size() != lm_active_size)
mLMActive.resize(lm_active_size, false);
TSparseSpaceType::Mult (mKLMAModified, mResidualLMActive, mLMActive);
// Write back solution
SetLMAPart(rX, mLMActive);
}
if (lm_inactive_size > 0) {
// Now we compute the residual of the LM
GetLMIPart (rB, mResidualLMInactive);
// LM = D⁻1*rLM
if (mLMInactive.size() != lm_inactive_size)
mLMInactive.resize(lm_inactive_size, false);
TSparseSpaceType::Mult (mKLMIModified, mResidualLMInactive, mLMInactive);
// Write back solution
SetLMIPart(rX, mLMInactive);
}
}
/**
* @brief This function is designed to be called at the end of the solve step.
* @details For example this is the place to remove any data that we do not want to save for later
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void FinalizeSolutionStep (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
mpSolverDispBlock->FinalizeSolutionStep(mKDispModified, mDisp, mResidualDisp);
}
/**
* @brief This function is designed to clean up all internal data in the solver.
* @details Clear is designed to leave the solver object as if newly created. After a clear a new Initialize is needed
*/
void Clear() override
{
mBlocksAreAllocated = false;
mpSolverDispBlock->Clear();
// We clear the matrixes and vectors
mKDispModified.clear(); /// The modified displacement block
mKLMAModified.clear(); /// The modified active LM block (diagonal)
mKLMIModified.clear(); /// The modified inaactive LM block (diagonal)
mKSAN.clear(); /// The slave active-displacement block
mKSAM.clear(); /// The active slave-master block
mKSASI.clear(); /// The active slave-inactive slave block
mKSASA.clear(); /// The active slave-slave active block
mPOperator.clear(); /// The operator used for the master blocks
mCOperator.clear(); /// The operator used for the active slave block
mResidualLMActive.clear(); /// The residual corresponding the active LM
mResidualLMInactive.clear(); /// The residual corresponding the inactive LM
mResidualDisp.clear(); /// The residual of the displacements
mLMActive.clear(); /// The solution of the active LM
mLMInactive.clear(); /// The solution of the inactive LM
mDisp.clear(); /// The solution of the displacement
mIsInitialized = false;
}
/**
* @brief Normal solve method.
* @details Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods.
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
bool Solve(
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
) override
{
// We print the system before condensate (if needed)
if (mEchoLevel == 2) { //if it is needed to print the debug info
KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl;
} else if (mEchoLevel == 3) { //if it is needed to print the debug info
KRATOS_INFO("LHS BEFORE CONDENSATION") << "SystemMatrix = " << rA << std::endl;
KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl;
} else if (mEchoLevel >= 4) { //print to matrix market file
const std::string matrix_market_name = "before_condensation_A_" + std::to_string(mFileCreated) + ".mm";
TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), rA, false);
const std::string matrix_market_vectname = "before_condensation_b_" + std::to_string(mFileCreated) + ".mm.rhs";
TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), rB);
}
if (mIsInitialized == false)
this->Initialize (rA,rX,rB);
this->InitializeSolutionStep (rA,rX,rB);
this->PerformSolutionStep (rA,rX,rB);
this->FinalizeSolutionStep (rA,rX,rB);
// We print the resulting system (if needed)
if (mEchoLevel == 2) { //if it is needed to print the debug info
KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl;
KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl;
} else if (mEchoLevel == 3) { //if it is needed to print the debug info
KRATOS_INFO("LHS") << "SystemMatrix = " << mKDispModified << std::endl;
KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl;
KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl;
} else if (mEchoLevel >= 4) { //print to matrix market file
const std::string matrix_market_name = "A_" + std::to_string(mFileCreated) + ".mm";
TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), mKDispModified, false);
const std::string matrix_market_vectname = "b_" + std::to_string(mFileCreated) + ".mm.rhs";
TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), mResidualDisp);
mFileCreated++;
}
return false;
}
/**
* @brief Multi solve method for solving a set of linear systems with same coefficient matrix.
* @details Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods.
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
bool Solve (
SparseMatrixType& rA,
DenseMatrixType& rX,
DenseMatrixType& rB
) override
{
return false;
}
/**
* @brief Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* @details Another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function tells if the solver requires such data
*/
bool AdditionalPhysicalDataIsNeeded() override
{
return true;
}
/**
* @brief Some solvers may require a minimum degree of knowledge of the structure of the matrix.
* @details To make an example when solving a mixed u-p problem, it is important to identify the row associated to v and p. Another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers which require knowledge on the spatial position of the nodes associated to a given dof. This function is the place to eventually provide such data
* @param rA System matrix
* @param rX Solution vector. It's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void ProvideAdditionalData (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB,
DofsArrayType& rDofSet,
ModelPart& rModelPart
) override
{
// Allocating auxiliar parameters
IndexType node_id;
// Count LM dofs
SizeType n_lm_inactive_dofs = 0, n_lm_active_dofs = 0;
SizeType n_master_dofs = 0;
SizeType n_slave_inactive_dofs = 0, n_slave_active_dofs = 0;
SizeType tot_active_dofs = 0;
// We separate if we consider a block builder and solver or an elimination builder and solver
if (rModelPart.IsNot(TO_SPLIT)) {
// In case of block builder and solver
for (auto& i_dof : rDofSet) {
node_id = i_dof.Id();
const NodeType& node = rModelPart.GetNode(node_id);
if (i_dof.EquationId() < rA.size1()) {
tot_active_dofs++;
if (IsLMDof(i_dof)) {
if (node.Is(ACTIVE))
n_lm_active_dofs++;
else
n_lm_inactive_dofs++;
} else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
if (node.Is(MASTER)) {
n_master_dofs++;
} else if (node.Is(SLAVE)) {
if (node.Is(ACTIVE))
n_slave_active_dofs++;
else
n_slave_inactive_dofs++;
}
}
}
}
} else {
// In case of elimination builder and solver
for (auto& i_dof : rDofSet) {
node_id = i_dof.Id();
const NodeType& node = rModelPart.GetNode(node_id);
tot_active_dofs++;
if (IsLMDof(i_dof)) {
if (node.Is(ACTIVE))
n_lm_active_dofs++;
else
n_lm_inactive_dofs++;
} else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
if (node.Is(MASTER)) {
n_master_dofs++;
} else if (node.Is(SLAVE)) {
if (node.Is(ACTIVE))
n_slave_active_dofs++;
else
n_slave_inactive_dofs++;
}
}
}
}
KRATOS_ERROR_IF(tot_active_dofs != rA.size1()) << "Total system size does not coincide with the free dof map: " << tot_active_dofs << " vs " << rA.size1() << std::endl;
// Resize arrays as needed
if (mMasterIndices.size() != n_master_dofs)
mMasterIndices.resize (n_master_dofs,false);
if (mSlaveInactiveIndices.size() != n_slave_inactive_dofs)
mSlaveInactiveIndices.resize (n_slave_inactive_dofs,false);
if (mSlaveActiveIndices.size() != n_slave_active_dofs)
mSlaveActiveIndices.resize (n_slave_active_dofs,false);
if (mLMInactiveIndices.size() != n_lm_inactive_dofs)
mLMInactiveIndices.resize (n_lm_inactive_dofs,false);
if (mLMActiveIndices.size() != n_lm_active_dofs)
mLMActiveIndices.resize (n_lm_active_dofs,false);
const SizeType n_other_dofs = tot_active_dofs - n_lm_inactive_dofs - n_lm_active_dofs - n_master_dofs - n_slave_inactive_dofs - n_slave_active_dofs;
if (mOtherIndices.size() != n_other_dofs)
mOtherIndices.resize (n_other_dofs, false);
if (mGlobalToLocalIndexing.size() != tot_active_dofs)
mGlobalToLocalIndexing.resize (tot_active_dofs,false);
if (mWhichBlockType.size() != tot_active_dofs)
mWhichBlockType.resize(tot_active_dofs, false);
// Size check
KRATOS_ERROR_IF_NOT(n_lm_active_dofs == n_slave_active_dofs) << "The number of active LM dofs: " << n_lm_active_dofs << " and active slave nodes dofs: " << n_slave_active_dofs << " does not coincide" << std::endl;
/**
* Construct aux_lists as needed
* "other_counter[i]" i will contain the position in the global system of the i-th NON-LM node
* "lm_active_counter[i]" will contain the in the global system of the i-th NON-LM node
* mGlobalToLocalIndexing[i] will contain the position in the local blocks of the
*/
SizeType lm_inactive_counter = 0, lm_active_counter = 0;
SizeType master_counter = 0;
SizeType slave_inactive_counter = 0, slave_active_counter = 0;
SizeType other_counter = 0;
IndexType global_pos = 0;
// We separate if we consider a block builder and solver or an elimination builder and solver
if (rModelPart.IsNot(TO_SPLIT)) {
// In case of block builder and solver
for (auto& i_dof : rDofSet) {
node_id = i_dof.Id();
const NodeType& r_node = rModelPart.GetNode(node_id);
if (i_dof.EquationId() < rA.size1()) {
if (IsLMDof(i_dof)) {
if (r_node.Is(ACTIVE)) {
mLMActiveIndices[lm_active_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = lm_active_counter;
mWhichBlockType[global_pos] = BlockType::LM_ACTIVE;
++lm_active_counter;
} else {
mLMInactiveIndices[lm_inactive_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = lm_inactive_counter;
mWhichBlockType[global_pos] = BlockType::LM_INACTIVE;
++lm_inactive_counter;
}
} else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
if (r_node.Is(MASTER)) {
mMasterIndices[master_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = master_counter;
mWhichBlockType[global_pos] = BlockType::MASTER;
++master_counter;
} else if (r_node.Is(SLAVE)) {
if (r_node.Is(ACTIVE)) {
mSlaveActiveIndices[slave_active_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = slave_active_counter;
mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE;
++slave_active_counter;
} else {
mSlaveInactiveIndices[slave_inactive_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = slave_inactive_counter;
mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE;
++slave_inactive_counter;
}
} else { // We need to consider always an else to ensure that the system size is consistent
mOtherIndices[other_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = other_counter;
mWhichBlockType[global_pos] = BlockType::OTHER;
++other_counter;
}
} else {
mOtherIndices[other_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = other_counter;
mWhichBlockType[global_pos] = BlockType::OTHER;
++other_counter;
}
++global_pos;
}
}
} else {
// In case of elimination builder and solver
for (auto& i_dof : rDofSet) {
node_id = i_dof.Id();
const NodeType& r_node = rModelPart.GetNode(node_id);
if (IsLMDof(i_dof)) {
if (r_node.Is(ACTIVE)) {
mLMActiveIndices[lm_active_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = lm_active_counter;
mWhichBlockType[global_pos] = BlockType::LM_ACTIVE;
++lm_active_counter;
} else {
mLMInactiveIndices[lm_inactive_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = lm_inactive_counter;
mWhichBlockType[global_pos] = BlockType::LM_INACTIVE;
++lm_inactive_counter;
}
} else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) {
if (r_node.Is(MASTER)) {
mMasterIndices[master_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = master_counter;
mWhichBlockType[global_pos] = BlockType::MASTER;
++master_counter;
} else if (r_node.Is(SLAVE)) {
if (r_node.Is(ACTIVE)) {
mSlaveActiveIndices[slave_active_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = slave_active_counter;
mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE;
++slave_active_counter;
} else {
mSlaveInactiveIndices[slave_inactive_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = slave_inactive_counter;
mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE;
++slave_inactive_counter;
}
} else { // We need to consider always an else to ensure that the system size is consistent
mOtherIndices[other_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = other_counter;
mWhichBlockType[global_pos] = BlockType::OTHER;
++other_counter;
}
} else {
mOtherIndices[other_counter] = global_pos;
mGlobalToLocalIndexing[global_pos] = other_counter;
mWhichBlockType[global_pos] = BlockType::OTHER;
++other_counter;
}
++global_pos;
}
}
KRATOS_DEBUG_ERROR_IF(master_counter != n_master_dofs) << "The number of active slave dofs counter : " << master_counter << "is higher than the expected: " << n_master_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(slave_active_counter != n_slave_active_dofs) << "The number of active slave dofs counter : " << slave_active_counter << "is higher than the expected: " << n_slave_active_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(slave_inactive_counter != n_slave_inactive_dofs) << "The number of inactive slave dofs counter : " << slave_inactive_counter << "is higher than the expected: " << n_slave_inactive_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(lm_active_counter != n_lm_active_dofs) << "The number of active LM dofs counter : " << lm_active_counter << "is higher than the expected: " << n_lm_active_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(lm_inactive_counter != n_lm_inactive_dofs) << "The number of inactive LM dofs counter : " << lm_inactive_counter << "is higher than the expected: " << n_lm_inactive_dofs << std::endl;
KRATOS_DEBUG_ERROR_IF(other_counter != n_other_dofs) << "The number of other dofs counter : " << other_counter << "is higher than the expected: " << n_other_dofs << std::endl;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "Mixed displacement LM linear solver";
}
/// Print information about this object.
void PrintInfo (std::ostream& rOStream) const override
{
rOStream << "Mixed displacement LM linear solver";
}
/// Print object's data.
void PrintData (std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief T his function generates the subblocks of matrix A
* @details as A = ( KNN KNM KNSI KNSA 0 0 ) u
* ( KMN KMM KMSI KMSA -MI^T -MA^T ) u_master
* ( KSIN KSIM KSISI KSISA DII^T DIA^T ) u_slave_inactive
* ( KSAN KSAM KSASI KSASA DAI^T DAA^T ) u_slave_active
* ( 0 0 0 0 ALMI 0 ) LMInactive
* ( 0 KLMAM KLMASI KLMASA 0 KLMALMA ) LMActive
* We will call as A = ( KNN KNM KNSI KNSA 0 0 ) u
* ( KMN KMM KMSI KMSA KMLMI KMLMA ) u_master
* ( KSIN KSIM KSISI KSISA KSILMI KSILMA ) u_slave_inactive
* ( KSAN KSAM KSASI KSASA KSALMI KSALMA ) u_slave_active
* ( 0 0 0 0 KLMILMI 0 ) LMInactive
* ( 0 KLMAM KLMASI KLMASA 0 KLMALMA ) LMActive
* Subblocks are allocated or nor depending on the value of "NeedAllocation"
* @param rA System matrix
* @param rX Solution vector. it's also the initial guess for iterative linear solvers.
* @param rB Right hand side vector.
*/
void FillBlockMatrices (
const bool NeedAllocation,
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB
)
{
KRATOS_TRY
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
const SizeType slave_active_size = mSlaveActiveIndices.size();
const SizeType lm_active_size = mLMActiveIndices.size();
const SizeType lm_inactive_size = mLMInactiveIndices.size();
if (NeedAllocation)
AllocateBlocks();
// Get access to A data
const IndexType* index1 = rA.index1_data().begin();
const IndexType* index2 = rA.index2_data().begin();
const double* values = rA.value_data().begin();
// Allocate the auxiliar blocks by push_back
SparseMatrixType KMLMA(master_size, lm_active_size); /// The master-active LM block (this is the big block of M)
SparseMatrixType KLMALMA(lm_active_size, lm_active_size); /// The active LM-active LM block
SparseMatrixType KSALMA(slave_active_size, lm_active_size); /// The active slave-active LM block (this is the big block of D, diagonal)
SparseMatrixType KLMILMI(lm_inactive_size, lm_inactive_size); /// The inactive LM- inactive LM block (diagonal)
IndexType* KMLMA_ptr = new IndexType[master_size + 1];
IndexType* mKSAN_ptr = new IndexType[slave_active_size + 1];
IndexType* mKSAM_ptr = new IndexType[slave_active_size + 1];
IndexType* mKSASI_ptr = new IndexType[slave_active_size + 1];
IndexType* mKSASA_ptr = new IndexType[slave_active_size + 1];
IndexType* KSALMA_ptr = new IndexType[slave_active_size + 1];
IndexType* KLMILMI_ptr = new IndexType[lm_inactive_size + 1];
IndexType* KLMALMA_ptr = new IndexType[lm_active_size + 1];
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(master_size + 1); i++)
KMLMA_ptr[i] = 0;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(slave_active_size + 1); i++) {
mKSAN_ptr[i] = 0;
mKSAM_ptr[i] = 0;
mKSASI_ptr[i] = 0;
mKSASA_ptr[i] = 0;
KSALMA_ptr[i] = 0;
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(lm_inactive_size + 1); i++)
KLMILMI_ptr[i] = 0;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(lm_active_size + 1); i++)
KLMALMA_ptr[i] = 0;
#pragma omp parallel
{
// We iterate over original matrix
#pragma omp for
for (int i=0; i<static_cast<int>(rA.size1()); i++) {
const IndexType row_begin = index1[i];
const IndexType row_end = index1[i+1];
const IndexType local_row_id = mGlobalToLocalIndexing[i];
IndexType KMLMA_cols = 0;
IndexType mKSAN_cols = 0;
IndexType mKSAM_cols = 0;
IndexType mKSASI_cols = 0;
IndexType mKSASA_cols = 0;
IndexType KSALMA_cols = 0;
IndexType KLMILMI_cols = 0;
IndexType KLMALMA_cols = 0;
if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block
++KMLMA_cols;
}
}
KRATOS_DEBUG_ERROR_IF(local_row_id > master_size) << "MASTER:: Local row ID: " << local_row_id <<" is greater than the number of rows " << master_size << std::endl;
KMLMA_ptr[local_row_id + 1] = KMLMA_cols;
} else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block
++mKSAN_cols;
} else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block
++mKSAM_cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block
++mKSASI_cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block
++mKSASA_cols;
} else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal)
++KSALMA_cols;
}
}
KRATOS_DEBUG_ERROR_IF(local_row_id > slave_active_size) << "SLAVE_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << slave_active_size << std::endl;
mKSAN_ptr[local_row_id + 1] = mKSAN_cols;
mKSAM_ptr[local_row_id + 1] = mKSAM_cols;
mKSASI_ptr[local_row_id + 1] = mKSASI_cols;
mKSASA_ptr[local_row_id + 1] = mKSASA_cols;
KSALMA_ptr[local_row_id + 1] = KSALMA_cols;
} else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal)
++KLMILMI_cols;
}
}
KRATOS_DEBUG_ERROR_IF(local_row_id > lm_inactive_size) << "LM_INACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_inactive_size << std::endl;
KLMILMI_ptr[local_row_id + 1] = KLMILMI_cols;
} else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block
++KLMALMA_cols;
}
}
KRATOS_DEBUG_ERROR_IF(local_row_id > lm_active_size) << "LM_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_active_size << std::endl;
KLMALMA_ptr[local_row_id + 1] = KLMALMA_cols;
}
}
}
// We initialize the blocks sparse matrix
std::partial_sum(KMLMA_ptr, KMLMA_ptr + master_size + 1, KMLMA_ptr);
const std::size_t KMLMA_nonzero_values = KMLMA_ptr[master_size];
IndexType* aux_index2_KMLMA= new IndexType[KMLMA_nonzero_values];
double* aux_val_KMLMA= new double[KMLMA_nonzero_values];
std::partial_sum(mKSAN_ptr, mKSAN_ptr + slave_active_size + 1, mKSAN_ptr);
const std::size_t mKSAN_nonzero_values = mKSAN_ptr[slave_active_size];
IndexType* aux_index2_mKSAN= new IndexType[mKSAN_nonzero_values];
double* aux_val_mKSAN= new double[mKSAN_nonzero_values];
std::partial_sum(mKSAM_ptr, mKSAM_ptr + slave_active_size + 1, mKSAM_ptr);
const std::size_t mKSAM_nonzero_values = mKSAM_ptr[slave_active_size];
IndexType* aux_index2_mKSAM= new IndexType[mKSAM_nonzero_values];
double* aux_val_mKSAM= new double[mKSAM_nonzero_values];
std::partial_sum(mKSASI_ptr, mKSASI_ptr + slave_active_size + 1, mKSASI_ptr);
const std::size_t mKSASI_nonzero_values = mKSASI_ptr[slave_active_size];
IndexType* aux_index2_mKSASI= new IndexType[mKSASI_nonzero_values];
double* aux_val_mKSASI= new double[mKSASI_nonzero_values];
std::partial_sum(mKSASA_ptr, mKSASA_ptr + slave_active_size + 1, mKSASA_ptr);
const std::size_t mKSASA_nonzero_values = mKSASA_ptr[slave_active_size];
IndexType* aux_index2_mKSASA= new IndexType[mKSASA_nonzero_values];
double* aux_val_mKSASA = new double[mKSASA_nonzero_values];
std::partial_sum(KSALMA_ptr, KSALMA_ptr + slave_active_size + 1, KSALMA_ptr);
const std::size_t KSALMA_nonzero_values = KSALMA_ptr[slave_active_size];
IndexType* aux_index2_KSALMA= new IndexType[KSALMA_nonzero_values];
double* aux_val_KSALMA = new double[KSALMA_nonzero_values];
std::partial_sum(KLMILMI_ptr, KLMILMI_ptr + lm_inactive_size + 1, KLMILMI_ptr);
const std::size_t KLMILMI_nonzero_values = KLMILMI_ptr[lm_inactive_size];
IndexType* aux_index2_KLMILMI= new IndexType[KLMILMI_nonzero_values];
double* aux_val_KLMILMI = new double[KLMILMI_nonzero_values];
std::partial_sum(KLMALMA_ptr, KLMALMA_ptr + lm_active_size + 1, KLMALMA_ptr);
const std::size_t KLMALMA_nonzero_values = KLMALMA_ptr[lm_active_size];
IndexType* aux_index2_KLMALMA = new IndexType[KLMALMA_nonzero_values];
double* aux_val_KLMALMA = new double[KLMALMA_nonzero_values];
#pragma omp parallel
{
// We iterate over original matrix
#pragma omp for
for (int i=0; i<static_cast<int>(rA.size1()); i++) {
const IndexType row_begin = index1[i];
const IndexType row_end = index1[i+1];
const IndexType local_row_id = mGlobalToLocalIndexing[i];
if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA
IndexType KMLMA_row_beg = KMLMA_ptr[local_row_id];
IndexType KMLMA_row_end = KMLMA_row_beg;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block
const double value = values[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
aux_index2_KMLMA[KMLMA_row_end] = local_col_id;
aux_val_KMLMA[KMLMA_row_end] = value;
++KMLMA_row_end;
}
}
} else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM
IndexType mKSAN_row_beg = mKSAN_ptr[local_row_id];
IndexType mKSAN_row_end = mKSAN_row_beg;
IndexType mKSAM_row_beg = mKSAM_ptr[local_row_id];
IndexType mKSAM_row_end = mKSAM_row_beg;
IndexType mKSASI_row_beg = mKSASI_ptr[local_row_id];
IndexType mKSASI_row_end = mKSASI_row_beg;
IndexType mKSASA_row_beg = mKSASA_ptr[local_row_id];
IndexType mKSASA_row_end = mKSASA_row_beg;
IndexType KSALMA_row_beg = KSALMA_ptr[local_row_id];
IndexType KSALMA_row_end = KSALMA_row_beg;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
const double value = values[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block
aux_index2_mKSAN[mKSAN_row_end] = local_col_id;
aux_val_mKSAN[mKSAN_row_end] = value;
++mKSAN_row_end;
} else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block
aux_index2_mKSAM[mKSAM_row_end] = local_col_id;
aux_val_mKSAM[mKSAM_row_end] = value;
++mKSAM_row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block
aux_index2_mKSASI[mKSASI_row_end] = local_col_id;
aux_val_mKSASI[mKSASI_row_end] = value;
++mKSASI_row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block
aux_index2_mKSASA[mKSASA_row_end] = local_col_id;
aux_val_mKSASA[mKSASA_row_end] = value;
++mKSASA_row_end;
} else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal)
aux_index2_KSALMA[KSALMA_row_end] = local_col_id;
aux_val_KSALMA[KSALMA_row_end] = value;
++KSALMA_row_end;
}
}
} else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI
IndexType KLMILMI_row_beg = KLMILMI_ptr[local_row_id];
IndexType KLMILMI_row_end = KLMILMI_row_beg;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal)
const double value = values[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
aux_index2_KLMILMI[KLMILMI_row_end] = local_col_id;
aux_val_KLMILMI[KLMILMI_row_end] = value;
++KLMILMI_row_end;
}
}
} else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA
IndexType KLMALMA_row_beg = KLMALMA_ptr[local_row_id];
IndexType KLMALMA_row_end = KLMALMA_row_beg;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = index2[j];
if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block
const double value = values[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
aux_index2_KLMALMA[KLMALMA_row_end] = local_col_id;
aux_val_KLMALMA[KLMALMA_row_end] = value;
++KLMALMA_row_end;
}
}
}
}
}
CreateMatrix(KMLMA, master_size, lm_active_size, KMLMA_ptr, aux_index2_KMLMA, aux_val_KMLMA);
CreateMatrix(mKSAN, slave_active_size, other_dof_size, mKSAN_ptr, aux_index2_mKSAN, aux_val_mKSAN);
CreateMatrix(mKSAM, slave_active_size, master_size, mKSAM_ptr, aux_index2_mKSAM, aux_val_mKSAM);
CreateMatrix(mKSASI, slave_active_size, slave_inactive_size, mKSASI_ptr, aux_index2_mKSASI, aux_val_mKSASI);
CreateMatrix(mKSASA, slave_active_size, slave_active_size, mKSASA_ptr, aux_index2_mKSASA, aux_val_mKSASA);
CreateMatrix(KSALMA, slave_active_size, lm_active_size, KSALMA_ptr, aux_index2_KSALMA, aux_val_KSALMA);
CreateMatrix(KLMILMI, lm_inactive_size, lm_inactive_size, KLMILMI_ptr, aux_index2_KLMILMI, aux_val_KLMILMI);
CreateMatrix(KLMALMA, lm_active_size, lm_active_size, KLMALMA_ptr, aux_index2_KLMALMA, aux_val_KLMALMA);
// We compute directly the inverse of the KSALMA matrix
// KSALMA it is supposed to be a diagonal matrix (in fact it is the key point of this formulation)
// (NOTE: technically it is not a stiffness matrix, we give that name)
if (lm_active_size > 0) {
ComputeDiagonalByLumping(KSALMA, mKLMAModified, ZeroTolerance);
}
// We compute directly the inverse of the KLMILMI matrix
// KLMILMI it is supposed to be a diagonal matrix (in fact it is the key point of this formulation)
// (NOTE: technically it is not a stiffness matrix, we give that name)
if (lm_inactive_size > 0) {
ComputeDiagonalByLumping(KLMILMI, mKLMIModified, ZeroTolerance);
}
// Compute the P and C operators
if (slave_active_size > 0) {
SparseMatrixMultiplicationUtility::MatrixMultiplication(KMLMA, mKLMAModified, mPOperator);
SparseMatrixMultiplicationUtility::MatrixMultiplication(KLMALMA, mKLMAModified, mCOperator);
}
// We proceed with the auxiliar products for the master blocks
SparseMatrixType master_auxKSAN(master_size, other_dof_size);
SparseMatrixType master_auxKSAM(master_size, master_size);
SparseMatrixType master_auxKSASI(master_size, slave_inactive_size);
SparseMatrixType master_auxKSASA(master_size, slave_active_size);
if (slave_active_size > 0) {
SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAN, master_auxKSAN);
SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAM, master_auxKSAM);
if (slave_inactive_size > 0)
SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASI, master_auxKSASI);
SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASA, master_auxKSASA);
}
// We proceed with the auxiliar products for the active slave blocks
SparseMatrixType aslave_auxKSAN(slave_active_size, other_dof_size);
SparseMatrixType aslave_auxKSAM(slave_active_size, master_size);
SparseMatrixType aslave_auxKSASI(slave_active_size, slave_inactive_size);
SparseMatrixType aslave_auxKSASA(slave_active_size, slave_active_size);
if (slave_active_size > 0) {
SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAN, aslave_auxKSAN);
SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAM, aslave_auxKSAM);
if (slave_inactive_size > 0)
SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASI, aslave_auxKSASI);
SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASA, aslave_auxKSASA);
}
// Auxiliar indexes
const SizeType other_dof_initial_index = 0;
const SizeType master_dof_initial_index = other_dof_size;
const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
// The auxiliar index structure
const SizeType nrows = mKDispModified.size1();
const SizeType ncols = mKDispModified.size2();
IndexType* K_disp_modified_ptr_aux1 = new IndexType[nrows + 1];
K_disp_modified_ptr_aux1[0] = 0;
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(rA.size1()); i++) {
if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA
ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1);
} else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM
ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1);
} else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA
ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1);
} else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA
ComputeNonZeroColumnsPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1);
}
}
}
// We initialize the final sparse matrix
std::partial_sum(K_disp_modified_ptr_aux1, K_disp_modified_ptr_aux1 + nrows + 1, K_disp_modified_ptr_aux1);
const SizeType nonzero_values_aux1 = K_disp_modified_ptr_aux1[nrows];
IndexType* aux_index2_K_disp_modified_aux1 = new IndexType[nonzero_values_aux1];
double* aux_val_K_disp_modified_aux1 = new double[nonzero_values_aux1];
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(rA.size1()); i++) {
if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA
ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
} else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM
ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
} else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA
ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
} else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA
ComputeAuxiliarValuesPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
}
}
}
// Create the first auxiliar matrix
CreateMatrix(mKDispModified, nrows, ncols, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1);
// Now we create the second matrix block to sum
IndexType* K_disp_modified_ptr_aux2 = new IndexType[nrows + 1];
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(nrows + 1); i++)
K_disp_modified_ptr_aux2[i] = 0;
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(master_size); i++) {
IndexType K_disp_modified_cols_aux2 = 0;
// Get access to master_auxKSAN data
if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) {
ComputeNonZeroBlocks(master_auxKSAN, i, K_disp_modified_cols_aux2);
}
// Get access to master_auxKSAM data
if (master_auxKSAM.nnz() > 0) {
ComputeNonZeroBlocks(master_auxKSAM, i, K_disp_modified_cols_aux2);
}
// Get access to master_auxKSASI data
if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
ComputeNonZeroBlocks(master_auxKSASI, i, K_disp_modified_cols_aux2);
}
// Get access to master_auxKSASA data
if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) {
ComputeNonZeroBlocks(master_auxKSASA, i, K_disp_modified_cols_aux2);
}
K_disp_modified_ptr_aux2[master_dof_initial_index + i + 1] = K_disp_modified_cols_aux2;
}
#pragma omp for
for (int i=0; i<static_cast<int>(slave_active_size); i++) {
IndexType K_disp_modified_cols_aux2 = 0;
// Get access to aslave_auxKSAN data
if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) {
ComputeNonZeroBlocks(aslave_auxKSAN, i, K_disp_modified_cols_aux2);
}
// Get access to aslave_auxKSAM data
if (aslave_auxKSAM.nnz() > 0 && master_size > 0) {
ComputeNonZeroBlocks(aslave_auxKSAM, i, K_disp_modified_cols_aux2);
}
// Get access to aslave_auxKSASI data
if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
ComputeNonZeroBlocks(aslave_auxKSASI, i, K_disp_modified_cols_aux2);
}
// Get access to aslave_auxKSASA data
if (aslave_auxKSASA.nnz() > 0) {
ComputeNonZeroBlocks(aslave_auxKSASA, i, K_disp_modified_cols_aux2);
}
K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i + 1] = K_disp_modified_cols_aux2;
}
}
// We initialize the final sparse matrix
std::partial_sum(K_disp_modified_ptr_aux2, K_disp_modified_ptr_aux2 + nrows + 1, K_disp_modified_ptr_aux2);
const SizeType nonzero_values_aux2 = K_disp_modified_ptr_aux2[nrows];
IndexType* aux_index2_K_disp_modified_aux2 = new IndexType[nonzero_values_aux2];
double* aux_val_K_disp_modified_aux2 = new double[nonzero_values_aux2];
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(master_size); i++) {
const IndexType row_beg = K_disp_modified_ptr_aux2[master_dof_initial_index + i];
IndexType row_end = row_beg;
// Get access to master_auxKSAN data
if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) {
ComputeAuxiliarValuesBlocks(master_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index);
}
// Get access to master_auxKSAM data
if (master_auxKSAM.nnz() > 0) {
ComputeAuxiliarValuesBlocks(master_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index);
}
// Get access to master_auxKSASI data
if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
ComputeAuxiliarValuesBlocks(master_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index);
}
// Get access to master_auxKSASA data
if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) {
ComputeAuxiliarValuesBlocks(master_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index);
}
}
#pragma omp for
for (int i=0; i<static_cast<int>(slave_active_size); i++) {
const IndexType row_beg = K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i];
IndexType row_end = row_beg;
// Get access to aslave_auxKSAN data
if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) {
ComputeAuxiliarValuesBlocks(aslave_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index);
}
// Get access to aslave_auxKSAM data
if (aslave_auxKSAM.nnz() > 0 && master_size > 0) {
ComputeAuxiliarValuesBlocks(aslave_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index);
}
// Get access to aslave_auxKSASI data
if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) {
ComputeAuxiliarValuesBlocks(aslave_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index);
}
// Get access to aslave_auxKSASA data
if (aslave_auxKSASA.nnz() > 0) {
ComputeAuxiliarValuesBlocks(aslave_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index);
}
}
}
// Create the second auxiliar matrix
SparseMatrixType K_disp_modified_aux2(nrows, ncols);
CreateMatrix(K_disp_modified_aux2, nrows, ncols, K_disp_modified_ptr_aux2, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2);
// We sum the auxiliar matrices
SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(mKDispModified, K_disp_modified_aux2, 1.0);
// Finally we ensure that the matrix is structurally symmetric
EnsureStructuralSymmetryMatrix(mKDispModified);
#ifdef KRATOS_DEBUG
CheckMatrix(mKDispModified);
#endif
// // DEBUG
// LOG_MATRIX_PRETTY(rA)
// LOG_MATRIX_PRETTY(mKDispModified)
KRATOS_CATCH ("")
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
LinearSolverPointerType mpSolverDispBlock; /// The pointer to the displacement linear solver
bool mBlocksAreAllocated; /// The flag that indicates if the blocks are allocated
bool mIsInitialized; /// The flag that indicates if the solution is mIsInitialized
IndexVectorType mMasterIndices; /// The vector storing the indices of the master nodes in contact
IndexVectorType mSlaveInactiveIndices; /// The vector storing the indices of the slave nodes in contact (Inactive)
IndexVectorType mSlaveActiveIndices; /// The vector storing the indices of the slave nodes in contact (Active)
IndexVectorType mLMInactiveIndices; /// The vector storing the indices of the LM (Inactive)
IndexVectorType mLMActiveIndices; /// The vector storing the indices of the LM (Active)
IndexVectorType mOtherIndices; /// The vector containing the indices for other DoF
IndexVectorType mGlobalToLocalIndexing; /// This vector stores the correspondance between the local and global
BlockTypeVectorType mWhichBlockType; /// This vector stores the LM block belongings
SparseMatrixType mKDispModified; /// The modified displacement block
SparseMatrixType mKLMAModified; /// The modified active LM block (inverted diagonal)
SparseMatrixType mKLMIModified; /// The modified inactive LM block (inverted diagonal)
SparseMatrixType mKSAN; /// The slave active-displacement block
SparseMatrixType mKSAM; /// The active slave-master block
SparseMatrixType mKSASI; /// The active slave-inactive slave block
SparseMatrixType mKSASA; /// The inactive slave-active slave block
SparseMatrixType mPOperator; /// The operator used for the master blocks
SparseMatrixType mCOperator; /// The operator used for the active slave block
VectorType mResidualLMActive; /// The residual of the active lagrange multipliers
VectorType mResidualLMInactive; /// The residual of the inactive lagrange multipliers
VectorType mResidualDisp; /// The residual of the rest of displacements
VectorType mLMActive; /// The solution of the active lagrange multiplies
VectorType mLMInactive; /// The solution of the inactive lagrange multiplies
VectorType mDisp; /// The solution of the rest of displacements
IndexType mEchoLevel = 0; /// The echo level of the solver
IndexType mFileCreated = 0; /// The index used to identify the file created
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method is mean to avoid code duplication when computing the non zero terms in the Aux1 matrix
* @param Index1 The indexes of nonzero rows
* @param Index2 The indexes of nonzero columns
* @param Values The array containing the values of the matrix
* @param CurrentRow The current row computed
* @param InitialIndex The index corresponding to the current row in the global contribution
* @param Ptr The nonzero terms of each column
*/
inline void ComputeNonZeroColumnsDispDoFs(
const IndexType* Index1,
const IndexType* Index2,
const double* Values,
const int CurrentRow,
const IndexType InitialIndex,
IndexType* Ptr
)
{
const IndexType row_begin = Index1[CurrentRow];
const IndexType row_end = Index1[CurrentRow + 1];
IndexType cols = 0;
const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = Index2[j];
if (mWhichBlockType[col_index] == BlockType::OTHER) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::MASTER) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
++cols;
}
}
Ptr[local_row_id + 1] = cols;
}
/**
* @brief This method is mean to avoid code duplication when computing the non zero terms in the Aux1 matrix
* @details The same as the previous one but not taking into account the contribution of the other dofs
* @param Index1 The indexes of nonzero rows
* @param Index2 The indexes of nonzero columns
* @param Values The array containing the values of the matrix
* @param CurrentRow The current row computed
* @param InitialIndex The index corresponding to the current row in the global contribution
* @param Ptr The nonzero terms of each column
*/
inline void ComputeNonZeroColumnsPartialDispDoFs(
const IndexType* Index1,
const IndexType* Index2,
const double* Values,
const int CurrentRow,
const IndexType InitialIndex,
IndexType* Ptr
)
{
const IndexType row_begin = Index1[CurrentRow];
const IndexType row_end = Index1[CurrentRow + 1];
IndexType cols = 0;
const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType col_index = Index2[j];
if (mWhichBlockType[col_index] == BlockType::MASTER) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
++cols;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
++cols;
}
}
Ptr[local_row_id + 1] = cols;
}
/**
* @brief This method is mean to avoid code duplication when evaluate the terms of the Aux1 matrix
* @param Index1 The indexes of nonzero rows
* @param Index2 The indexes of nonzero columns
* @param Values The array containing the values of the matrix
* @param CurrentRow The current row computed
* @param InitialIndex The index corresponding to the current row in the global contribution
* @param Ptr The nonzero terms of each column
* @param AuxIndex2 The indexes of the non zero columns
* @param AuxVals The values of the final matrix
*/
inline void ComputeAuxiliarValuesDispDoFs(
const IndexType* Index1,
const IndexType* Index2,
const double* Values,
const int CurrentRow,
const IndexType InitialIndex,
IndexType* Ptr,
IndexType* AuxIndex2,
double* AuxVals
)
{
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
// Auxiliar indexes
const SizeType other_dof_initial_index = 0;
const SizeType master_dof_initial_index = other_dof_size;
const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
// Some indexes
const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
const IndexType row_begin_A = Index1[CurrentRow];
const IndexType row_end_A = Index1[CurrentRow + 1];
const IndexType row_beg = Ptr[local_row_id];
IndexType row_end = row_beg;
for (IndexType j=row_begin_A; j<row_end_A; j++) {
const IndexType col_index = Index2[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
const double value = Values[j];
if (mWhichBlockType[col_index] == BlockType::OTHER) {
AuxIndex2[row_end] = local_col_id + other_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::MASTER) {
AuxIndex2[row_end] = local_col_id + master_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
}
}
}
/**
* @brief This method is mean to avoid code duplication when evaluate the terms of the Aux1 matrix
* @details The same as the previous one but not taking into account the contribution of the other dofs
* @param Index1 The indexes of nonzero rows
* @param Index2 The indexes of nonzero columns
* @param Values The array containing the values of the matrix
* @param CurrentRow The current row computed
* @param InitialIndex The index corresponding to the current row in the global contribution
* @param Ptr The nonzero terms of each column
* @param AuxIndex2 The indexes of the non zero columns
* @param AuxVals The values of the final matrix
*/
inline void ComputeAuxiliarValuesPartialDispDoFs(
const IndexType* Index1,
const IndexType* Index2,
const double* Values,
const int CurrentRow,
const IndexType InitialIndex,
IndexType* Ptr,
IndexType* AuxIndex2,
double* AuxVals
)
{
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
// Auxiliar indexes
const SizeType master_dof_initial_index = other_dof_size;
const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size;
const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size;
// Some indexes
const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex;
const IndexType row_begin_A = Index1[CurrentRow];
const IndexType row_end_A = Index1[CurrentRow + 1];
const IndexType row_beg = Ptr[local_row_id];
IndexType row_end = row_beg;
for (IndexType j=row_begin_A; j<row_end_A; j++) {
const IndexType col_index = Index2[j];
const IndexType local_col_id = mGlobalToLocalIndexing[col_index];
const double value = Values[j];
if (mWhichBlockType[col_index] == BlockType::MASTER) {
AuxIndex2[row_end] = local_col_id + master_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) {
AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
} else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) {
AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index;
AuxVals[row_end] = value;
++row_end;
}
}
}
/**
* @brief This is a method to check the block containing nonzero values
* @param AuxK The auxiliar block
* @param CurrentRow The current row computed
* @param KDispModifiedColsAux2 The nonzero rows array
*/
inline void ComputeNonZeroBlocks(
const SparseMatrixType& AuxK,
const int CurrentRow,
IndexType& KDispModifiedColsAux2
)
{
// Get access to aux_K data
const IndexType* aux_K_index1 = AuxK.index1_data().begin();
const IndexType row_begin = aux_K_index1[CurrentRow];
const IndexType row_end = aux_K_index1[CurrentRow + 1];
for (IndexType j=row_begin; j<row_end; j++) {
++KDispModifiedColsAux2;
}
}
/**
* @brief This is a method to compute the contribution of the auxiliar blocks
* @param AuxK The auxiliar block
* @param AuxIndex2 The indexes of the non zero columns
* @param AuxVals The values of the final matrix
* @param CurrentRow The current row computed
* @param RowEnd The last column computed
* @param InitialIndexColumn The initial column index of the auxiliar block in the final matrix
*/
inline void ComputeAuxiliarValuesBlocks(
const SparseMatrixType& AuxK,
IndexType* AuxIndex2,
double* AuxVals,
const int CurrentRow,
IndexType& RowEnd,
const SizeType InitialIndexColumn
)
{
// Get access to aux_K data
const double* aux_values = AuxK.value_data().begin();
const IndexType* aux_K_index1 = AuxK.index1_data().begin();
const IndexType* aux_K_index2 = AuxK.index2_data().begin();
const IndexType aux_K_row_begin = aux_K_index1[CurrentRow];
const IndexType aux_K_row_end = aux_K_index1[CurrentRow + 1];
for (IndexType j=aux_K_row_begin; j<aux_K_row_end; j++) {
const IndexType col_index = InitialIndexColumn + aux_K_index2[j];
AuxIndex2[RowEnd] = col_index;
AuxVals[RowEnd] = -aux_values[j];
++RowEnd;
}
}
/**
* @brief It allocates all the blocks and operators
*/
inline void AllocateBlocks()
{
// We clear the matrixes
mKDispModified.clear(); /// The modified displacement block
mKLMAModified.clear(); /// The modified active LM block (diagonal)
mKLMIModified.clear(); /// The modified inaactive LM block (diagonal)
mKSAN.clear(); /// The slave active-displacement block
mKSAM.clear(); /// The active slave-master block
mKSASI.clear(); /// The active slave-inactive slave block
mKSASA.clear(); /// The active slave-slave active block
mPOperator.clear(); /// The operator used for the master blocks
mCOperator.clear(); /// The operator used for the active slave block
mResidualLMActive.clear(); /// The residual corresponding the active LM
mResidualLMInactive.clear(); /// The residual corresponding the inactive LM
mResidualDisp.clear(); /// The residual of the displacements
mLMActive.clear(); /// The solution of the active LM
mLMInactive.clear(); /// The solution of the inactive LM
mDisp.clear(); /// The solution of the displacement
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
const SizeType slave_active_size = mSlaveActiveIndices.size();
const SizeType lm_active_size = mLMActiveIndices.size();
const SizeType lm_inactive_size = mLMInactiveIndices.size();
const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size;
// We do the allocation
mKDispModified.resize(total_size, total_size, false); /// The modified displacement block
mKLMAModified.resize(lm_active_size, lm_active_size, false); /// The modified active LM block (diagonal)
mKLMAModified.reserve(lm_active_size);
mKLMIModified.resize(lm_inactive_size, lm_inactive_size, false); /// The modified inactve LM block (diagonal)
mKLMIModified.reserve(lm_inactive_size);
mKSAN.resize(slave_active_size, other_dof_size, false); /// The slave active-displacement block
mKSAM.resize(slave_active_size, master_size, false); /// The active slave-master block
mKSASI.resize(slave_active_size, slave_inactive_size, false); /// The active slave-inactive slave block
mKSASA.resize(slave_active_size, slave_active_size, false); /// The active slave-slave active block
mPOperator.resize(master_size, slave_active_size, false); /// The operator used for the master blocks
mCOperator.resize(lm_active_size, slave_active_size, false); /// The operator used for the active slave block
mResidualLMActive.resize(lm_active_size, false ); /// The residual corresponding the active LM
mResidualLMInactive.resize(lm_inactive_size, false ); /// The residual corresponding the inactive LM
mResidualDisp.resize(total_size ); /// The residual of the displacements
mLMActive.resize(lm_active_size, false); /// The solution of the active LM
mLMInactive.resize(lm_inactive_size, false); /// The solution of the inactive LM
mDisp.resize(total_size, false); /// The solution of the displacement
}
/**
* @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to u-dofs
* @param rTotalResidual The total residual of the problem
* @param ResidualU The vector containing the residual relative to the displacements
*/
inline void GetUPart (
const VectorType& rTotalResidual,
VectorType& ResidualU
)
{
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
const SizeType slave_active_size = mSlaveActiveIndices.size();
const SizeType lm_active_size = mLMActiveIndices.size();
const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size;
// Resize in case the size is not correct
if (ResidualU.size() != total_size )
ResidualU.resize (total_size, false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(other_dof_size); i++)
ResidualU[i] = rTotalResidual[mOtherIndices[i]];
// The corresponding residual for the active slave DoF's
VectorType aux_res_active_slave(slave_active_size);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(slave_active_size); i++)
aux_res_active_slave[i] = rTotalResidual[mSlaveActiveIndices[i]];
if (slave_active_size > 0) {
// We compute the complementary residual for the master dofs
VectorType aux_complement_master_residual(master_size);
TSparseSpaceType::Mult(mPOperator, aux_res_active_slave, aux_complement_master_residual);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(master_size); i++)
ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]] - aux_complement_master_residual[i];
} else {
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(master_size); i++)
ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]];
}
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(slave_inactive_size); i++)
ResidualU[other_dof_size + master_size + i] = rTotalResidual[mSlaveInactiveIndices[i]];
if (slave_active_size > 0) {
// We compute the complementary residual for the master dofs
VectorType aux_complement_active_lm_residual(lm_active_size);
TSparseSpaceType::Mult(mCOperator, aux_res_active_slave, aux_complement_active_lm_residual);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(lm_active_size); i++)
ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]] - aux_complement_active_lm_residual[i];
} else {
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(lm_active_size); i++)
ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]];
}
}
/**
* @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to active lm-dofs
* @param rTotalResidual The total residual of the problem
* @param rResidualLMA The vector containing the residual relative to the active LM
*/
inline void GetLMAPart(
const VectorType& rTotalResidual,
VectorType& rResidualLMA
)
{
// Auxiliar sizes
const SizeType other_dof_size = mOtherIndices.size();
const SizeType master_size = mMasterIndices.size();
const SizeType slave_inactive_size = mSlaveInactiveIndices.size();
const SizeType slave_active_size = mSlaveActiveIndices.size();
// We add the other
if (slave_active_size > 0) {
// We get the displacement residual of the active slave nodes
if (rResidualLMA.size() != slave_active_size )
rResidualLMA.resize (slave_active_size, false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(rResidualLMA.size()); i++)
rResidualLMA[i] = rTotalResidual[mSlaveActiveIndices[i]];
// From the computed displacements we get the components of the displacements for each block
VectorType disp_N(other_dof_size);
VectorType disp_M(master_size);
VectorType disp_SI(slave_inactive_size);
VectorType disp_SA(slave_active_size);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(other_dof_size); i++)
disp_N[i] = mDisp[i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(master_size); i++)
disp_M[i] = mDisp[other_dof_size + i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(slave_inactive_size); i++)
disp_SI[i] = mDisp[other_dof_size + master_size + i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(slave_active_size); i++)
disp_SA[i] = mDisp[other_dof_size + master_size + slave_inactive_size + i];
VectorType aux_mult(slave_active_size);
TSparseSpaceType::Mult(mKSAN, disp_N, aux_mult);
TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
TSparseSpaceType::Mult(mKSAM, disp_M, aux_mult);
TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
if (slave_inactive_size > 0) {
TSparseSpaceType::Mult(mKSASI, disp_SI, aux_mult);
TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
}
TSparseSpaceType::Mult(mKSASA, disp_SA, aux_mult);
TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult);
}
}
/**
* @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to inactive lm-dofs
* @param rTotalResidual The total residual of the problem
* @param rResidualLMI The vector containing the residual relative to the inactive LM
*/
inline void GetLMIPart (
const VectorType& rTotalResidual,
VectorType& rResidualLMI
)
{
// Auxiliar size
const SizeType lm_inactive_size = mLMInactiveIndices.size();
// We get the displacement residual of the active slave nodes
if (rResidualLMI.size() != lm_inactive_size )
rResidualLMI.resize (lm_inactive_size, false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(lm_inactive_size); i++)
rResidualLMI[i] = rTotalResidual[mLMInactiveIndices[i]];
}
/**
* @brief This method writes the displacement part
* @param rTotalResidual The total residual of the problem
* @param ResidualU The vector containing the residual relative to the displacements
*/
inline void SetUPart (
VectorType& rTotalResidual,
const VectorType& ResidualU
)
{
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(mOtherIndices.size()); i++)
rTotalResidual[mOtherIndices[i]] = ResidualU[i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(mMasterIndices.size()); i++)
rTotalResidual[mMasterIndices[i]] = ResidualU[mOtherIndices.size() + i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(mSlaveInactiveIndices.size()); i++)
rTotalResidual[mSlaveInactiveIndices[i]] = ResidualU[mOtherIndices.size() + mMasterIndices.size() + i];
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(mSlaveActiveIndices.size()); i++)
rTotalResidual[mSlaveActiveIndices[i]] = ResidualU[mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + i];
}
/**
* @brief This method writes the active Lagrange Multiplier part
* @param rTotalResidual The total residual of the problem
* @param ResidualLMA The vector containing the residual relative to the active LM
*/
inline void SetLMAPart (
VectorType& rTotalResidual,
const VectorType& ResidualLMA
)
{
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(ResidualLMA.size()); i++)
rTotalResidual[mLMActiveIndices[i]] = ResidualLMA[i];
}
/**
* @brief This method writes the inaactive Lagrange Multiplier part
* @param rTotalResidual The total residual of the problem
* @param ResidualLMI The vector containing the residual relative to the inactive LM
*/
inline void SetLMIPart (
VectorType& rTotalResidual,
const VectorType& ResidualLMI
)
{
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(ResidualLMI.size()); i++)
rTotalResidual[mLMInactiveIndices[i]] = ResidualLMI[i];
}
/**
* @brief This method is intended to use to ensure the matrix is structurally symmetric
* @param rA The matrix to be checked
*/
void EnsureStructuralSymmetryMatrix (SparseMatrixType& rA)
{
// We compute the transposed matrix
const SizeType size_system_1 = rA.size1();
const SizeType size_system_2 = rA.size2();
SparseMatrixType transpose(size_system_2, size_system_1);
SparseMatrixMultiplicationUtility::TransposeMatrix<SparseMatrixType, SparseMatrixType>(transpose, rA, 0.0);
// Finally we sum the auxiliar matrices
SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(rA, transpose, 1.0);
}
/**
* @brief This method is intended to use to check the matrix
* @param rA The matrix to be checked
*/
double CheckMatrix (const SparseMatrixType& rA)
{
// Get access to A data
const std::size_t* index1 = rA.index1_data().begin();
const std::size_t* index2 = rA.index2_data().begin();
const double* values = rA.value_data().begin();
double norm = 0.0;
for (std::size_t i=0; i<rA.size1(); ++i) {
std::size_t row_begin = index1[i];
std::size_t row_end = index1[i+1];
if (row_end - row_begin == 0)
KRATOS_WARNING("Checking sparse matrix") << "Line " << i << " has no elements" << std::endl;
for (std::size_t j=row_begin; j<row_end; j++) {
KRATOS_ERROR_IF( index2[j] > rA.size2() ) << "Array above size of A" << std::endl;
norm += values[j]*values[j];
}
}
return std::sqrt (norm);
}
/**
* @brief This method is designed to create the final solution sparse matrix from the auxiliar values
* @detail Before create it reorder the columns. It deletes the auxiliar values after compute the matrix
* @param AuxK The matrix solution
* @param NRows The number of rows of the matrix
* @param NCols The number of columns of the matrix
* @param Ptr The indexes taht indicate the number of nonzero values in each column
* @param AuxIndex2 The indexes of the nonzero columns
* @param AuxVal The array containing the values of the sparse matrix
*/
void CreateMatrix(
SparseMatrixType& AuxK,
const SizeType NRows,
const SizeType NCols,
IndexType* Ptr,
IndexType* AuxIndex2,
double* AuxVal
)
{
// We reorder the rows
SparseMatrixMultiplicationUtility::SortRows(Ptr, NRows, NCols, AuxIndex2, AuxVal);
// Finally we build the final matrix
SparseMatrixMultiplicationUtility::CreateSolutionMatrix(AuxK, NRows, NCols, Ptr, AuxIndex2, AuxVal);
// Release memory
delete[] Ptr;
delete[] AuxIndex2;
delete[] AuxVal;
}
/**
* @brief This method is intended to lump an existing matrix
* @param rA The matrix to be lumped
* @param rdiagA The resulting matrix
* @param Tolerance The tolerance considered to check if the values are almost 0
* @todo Improve the lumping in case of not pure diagonal matrix
*/
void ComputeDiagonalByLumping (
const SparseMatrixType& rA,
SparseMatrixType& rdiagA,
const double Tolerance = ZeroTolerance
)
{
// Aux values
const std::size_t size_A = rA.size1();
// VectorType diagA_vector(size_A);
//
// // In case of not pure lumped matrix
// if (rA.nnz() > size_A) {
// // Get access to A data
// const std::size_t* index1 = rA.index1_data().begin();
// const double* values = rA.value_data().begin();
//
// #pragma omp parallel for
// for (int i=0; i< static_cast<int>(size_A); i++) {
// const std::size_t row_begin = index1[i];
// const std::size_t row_end = index1[i+1];
// double temp = 0.0;
// for (std::size_t j=row_begin; j<row_end; j++)
// temp += values[j]*values[j];
//
// diagA_vector[i] = std::sqrt(temp);
// }
// } else { // Otherwise
// #pragma omp parallel for
// for (int i=0; i< static_cast<int>(size_A); i++) {
// diagA_vector[i] = rA(i, i);
// }
// }
IndexType* ptr = new IndexType[size_A + 1];
ptr[0] = 0;
IndexType* aux_index2 = new IndexType[size_A];
double* aux_val = new double[size_A];
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(size_A); i++) {
ptr[i+1] = i+1;
aux_index2[i] = i;
const double value = rA(i, i);
// const double value = diagA_vector[i];
if (std::abs(value) > Tolerance)
aux_val[i] = 1.0/value;
else // Auxiliar value
aux_val[i] = 1.0;
}
SparseMatrixMultiplicationUtility::CreateSolutionMatrix(rdiagA, size_A, size_A, ptr, aux_index2, aux_val);
delete[] ptr;
delete[] aux_index2;
delete[] aux_val;
}
/**
* @brief Checks if the degree of freedom belongs to a displacement DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a displacement dof
*/
static inline bool IsDisplacementDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == DISPLACEMENT_X ||
r_variable == DISPLACEMENT_Y ||
r_variable == DISPLACEMENT_Z) {
return true;
}
return false;
}
/**
* @brief Checks if the degree of freedom belongs to a LM DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a LM dof
*/
static inline bool IsLMDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == VECTOR_LAGRANGE_MULTIPLIER_X ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Y ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Z) {
return true;
}
return false;
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters()
{
Parameters default_parameters( R"(
{
"solver_type" : "mixed_ulm_linear_solver",
"tolerance" : 1.0e-6,
"max_iteration_number" : 200,
"echo_level" : 0
} )" );
return default_parameters;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class MixedULMLinearSolver
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
inline std::istream& operator >> (std::istream& IStream,
MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis)
{
return IStream;
}
/// output stream function
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
inline std::ostream& operator << (std::ostream& rOStream,
const MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis)
{
rThis.PrintInfo (rOStream);
rOStream << std::endl;
rThis.PrintData (rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_MIXEDULM_SOLVER_H_INCLUDED defined
|
prod-cons.c | #include <omp.h>
#include <stdio.h>
#define SIZE 100000
int flag = 0;
void fill_rand(int N,double A[])
{
for(int i=0;i<N;++i)
A[i] = 1;
printf("Producer populated data\n");
#pragma omp flush
flag = 1;
#pragma omp flush(flag)
}
double Sum_array(int N,double A[])
{
double sum = 0.0;
int p_flag;
while(1)
{
p_flag = 0;
#pragma omp flush(flag)
p_flag = flag;
if(p_flag)
break;
}
#pragma omp flush
for(int i=0;i<N;++i)
sum = sum + A[i];
printf("Consumer calculated Array sum\n" );
return sum;
}
double seq_prod_cons()
{
double A[SIZE];
fill_rand(SIZE,A);
double sum = Sum_array(SIZE,A);
return sum;
}
double parallel_prod_cons()
{
double A[SIZE];
double sum = 0.0;
omp_set_num_threads(2);
#pragma omp parallel sections
{
#pragma omp section
fill_rand(SIZE,A);
#pragma omp section
sum = Sum_array(SIZE,A);
}
return sum;
}
int main()
{
double time_taken_seq,time_taken_parallel,sum=0.0;
//Sequential Producer-Consumer
time_taken_seq = omp_get_wtime();
sum = seq_prod_cons();
time_taken_seq = omp_get_wtime() - time_taken_seq;
printf("In %lf seconds, Sequential code gives sum : %lf \n",time_taken_seq,sum);
//Parallel Producer-Consumer
time_taken_parallel = omp_get_wtime();
sum = parallel_prod_cons();
time_taken_parallel = omp_get_wtime() - time_taken_parallel;
printf("In %lf seconds, Parallel code gives sum : %lf \n",time_taken_parallel,sum);
printf("Speed up : %lf\n", time_taken_parallel/time_taken_seq);
} |
856.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) {
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 500; t2 += 1) {
#pragma omp parallel for private(t4,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 8)
for (t10 = t8; t10 <= (t8 + 7 < n - 2 ? t8 + 7 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12];
#pragma omp parallel for private(t4,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 8)
for (t10 = t8; t10 <= (t8 + 7 < n - 2 ? t8 + 7 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12];
}
}
|
stencil.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#define DIM 10
int main()
{
float** Matrix = (float**)malloc(sizeof(float*) * DIM);
float** Matrix2 = (float**)malloc(sizeof(float*) * DIM);
for(int i = 0; i < DIM; i++)
{
Matrix[i] = (float*)malloc(sizeof(float) * DIM);
Matrix2[i] = (float*)malloc(sizeof(float) * DIM);
}
for(int i = 0; i < DIM; i++)
{
Matrix[0][i] = 1.0;
Matrix[DIM-1][i] = 1.0;
Matrix[i][0] = 1.0;
Matrix[i][DIM-1] = 1.0;
Matrix2[0][i] = 1.0;
Matrix2[DIM-1][i] = 1.0;
Matrix2[i][0] = 1.0;
Matrix2[i][DIM-1] = 1.0;
}
#pragma omp parallel for
{
for(int i = 1; i < DIM-1; i++)
{
for(int j = 1; j < DIM-1; j++)
{
float star = 4 * Matrix[i][j] - Matrix[i-1][j] - Matrix[i+1][j] - Matrix[i][j-1] - Matrix[i][j+1];
Matrix2[i][j] = star * star;
}
}
}
#pragma omp parallel
{
if(omp_get_thread_num() == 0)
{
for(int i = 0; i < DIM; i++)
{
for(int j = 0; j < DIM; j++)
{
printf("%.4f ", Matrix2[i][j]);
}
printf("\n");
}
}
}
for(int i = 0; i < DIM; i++)
{
free(Matrix[i]);
free(Matrix2[i]);
}
free(Matrix);
free(Matrix2);
return 0;
}
|
threadpool.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include <string>
#include <vector>
#include <functional>
#include <memory>
#include "core/common/common.h"
#include "core/platform/env.h"
#include <functional>
#include <memory>
// This file use PIMPL to avoid having eigen headers here
namespace Eigen {
class Allocator;
class ThreadPoolInterface;
} // namespace Eigen
namespace onnxruntime {
struct TensorOpCost {
double bytes_loaded;
double bytes_stored;
double compute_cycles;
};
template <typename Environment>
class ThreadPoolTempl;
namespace concurrency {
class ExtendedThreadPoolInterface;
class LoopCounter;
class ThreadPool {
public:
#ifdef _WIN32
using NAME_CHAR_TYPE = wchar_t;
#else
using NAME_CHAR_TYPE = char;
#endif
// Constructs a pool for running with with "degree_of_parallelism" threads with
// specified "name". env->StartThread() is used to create individual threads
// with the given ThreadOptions. If "low_latency_hint" is true the thread pool
// implementation may use it as a hint that lower latency is preferred at the
// cost of higher CPU usage, e.g. by letting one or more idle threads spin
// wait. Conversely, if the threadpool is used to schedule high-latency
// operations like I/O the hint should be set to false.
//
// REQUIRES: degree_of_parallelism > 0
// The allocator parameter is only used for creating a Eigen::ThreadPoolDevice to be used with Eigen Tensor classes.
ThreadPool(Env* env,
const ThreadOptions& thread_options,
const NAME_CHAR_TYPE* name,
int degree_of_parallelism,
bool low_latency_hint);
// Waits until all scheduled work has finished and then destroy the
// set of threads.
~ThreadPool();
// Schedules fn() for execution in the pool of threads. The function may run
// synchronously if it cannot be enqueued. This will occur if the thread pool's
// degree-of-parallelism is 1, but it may also occur for implementation-dependent
// reasons such as if queues used for buffering work are full.
static void Schedule(ThreadPool* tp,
std::function<void()> fn) {
if (tp) {
tp->Schedule(fn);
} else {
fn();
}
}
// ParallelFor shards the "total" units of work assuming each unit of work
// having roughly "cost_per_unit" cost, in cycles. Each unit of work is
// indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work
// and the total cost of each shard is roughly the same.
//
// "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds
// if not CPU-bound) to complete a unit of work. Overestimating creates too
// many shards and CPU time will be dominated by per-shard overhead, such as
// Context creation. Underestimating may not fully make use of the specified
// parallelism, and may also cause inefficiencies due to load balancing
// issues and stragglers.
static void TryParallelFor(ThreadPool* tp, std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
TryParallelFor(tp, total, TensorOpCost{0, 0, static_cast<double>(cost_per_unit)}, fn);
}
static void TryParallelFor(ThreadPool* tp, std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
// Directly schedule the 'total' tasks to the underlying threadpool, without
// cutting them by halves
inline static void TrySimpleParallelFor(ThreadPool* tp, std::ptrdiff_t total,
const std::function<void(std::ptrdiff_t)>& fn) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp != nullptr) {
tp->SimpleParallelFor(total, fn);
} else {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
}
#endif
}
/**
* Tries to call the given function in parallel, with calls split into (num_batches) batches.
*\param num_batches If it is zero, it will be replaced to the value of DegreeOfParallelism().
*\param fn A std::function or STL style functor with signature of "void f(int32_t);"
* Pitfall: Caller should cap `num_batches` to a reasonable value based on the cost of `fn` and the value of `total`.
*For example, if fn is as simple as: int sum=0; fn = [&](int i){sum +=i;} and `total` is 100, then num_batches should
*be just 1.
*
* ```
**/
template <typename F>
inline static void TryBatchParallelFor(ThreadPool* tp, std::ptrdiff_t total, F&& fn, std::ptrdiff_t num_batches) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
ORT_UNUSED_PARAMETER(num_batches);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp == nullptr) {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
return;
}
if (total <= 0)
return;
if (total == 1) {
fn(0);
return;
}
if (num_batches <= 0) {
num_batches = std::min<ptrdiff_t>(total, DegreeOfParallelism(tp));
}
if (num_batches <= 1) {
for (int i = 0; i < total; i++) {
fn(i);
}
return;
}
tp->SimpleParallelFor(num_batches, [&](std::ptrdiff_t batch_index) {
auto work = PartitionWork(batch_index, num_batches, total);
for (std::ptrdiff_t i = work.start; i < work.end; i++) {
fn(i);
}
});
#endif
}
struct WorkInfo {
std::ptrdiff_t start;
std::ptrdiff_t end;
};
/** Calculate the start and end offsets for a batch.
@remarks Based on MlasPartitionWork
*/
static WorkInfo PartitionWork(std::ptrdiff_t batch_idx, std::ptrdiff_t num_batches, std::ptrdiff_t total_work) {
const std::ptrdiff_t work_per_batch = total_work / num_batches;
const std::ptrdiff_t work_per_batch_extra = total_work % num_batches;
WorkInfo info;
if (batch_idx < work_per_batch_extra) {
info.start = (work_per_batch + 1) * batch_idx;
info.end = info.start + work_per_batch + 1;
} else {
info.start = work_per_batch * batch_idx + work_per_batch_extra;
info.end = info.start + work_per_batch;
}
return info;
}
//......................................................................
//
// The following static methods take into account whether OpenMP is
// enabled/disabled, and if the thread pool pointer is nullptr
// during sequential execution.
// Provide a hint to the caller for whether or not to parallelize
// work. This lets a caller switch to a sequential version of an
// algorithm rather than using calls via the ParallelFor functions.
static bool ShouldParallelize(const ThreadPool* tp);
// Return the degree of parallelism that code should assume when using the thread pool.
// It decouples the degree of parallelism for use with the thread pool from
// the implementation choice of whether this matches the number of threads created in
// the pool.
//
// Currently, a loop with degree-of-parallelism N is supported by a pool of N-1 threads
// working in combination with the thread initiating the loop.
static int DegreeOfParallelism(const ThreadPool* tp);
ORT_DISALLOW_COPY_AND_ASSIGNMENT(ThreadPool);
private:
friend class LoopCounter;
// Returns the number of threads created in the pool. This may be different from the
// value returned by DegreeOfParallelism to code using the pool.
int NumThreads() const;
// Returns current thread id between 0 and NumThreads() - 1, if called from a
// thread in the pool. Returns -1 otherwise.
int CurrentThreadId() const;
// Run fn with up to n degree-of-parallelism enlisting the thread pool for
// help. The degree-of-parallelism includes the caller, and so if n==1
// then the function will run directly in the caller. The fork-join
// synchronization is handled in the thread pool, and so any state captured
// by fn() is safe from concurrent access once RunWithHelp returns.
void RunInParallel(std::function<void()> fn, int n);
// Divides the work represented by the range [0, total) into k shards.
// Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k).
// Each shard may be executed on a different thread in parallel, depending on
// the number of threads available in the pool.
// When (i+1)*block_size > total, fn(i*block_size, total) is called instead.
// Requires 0 < block_size <= total.
void ParallelForFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
// Return whether or not the calling thread should run a loop of
// num_iterations divided in chunks of block_size in parallel. If not,
// the caller should run the loop sequentially.
bool ShouldParallelizeLoop(const std::ptrdiff_t num_iterations,
const std::ptrdiff_t block_size = 1) const;
// Internal (non-static) parallel loop methods. Unlike the public static methods,
// these will not handle the cases of OpenMP builds. or builds without a threadpool.
void ParallelFor(std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
void ParallelFor(std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t)>& fn);
void SimpleParallelFor(std::ptrdiff_t total, const std::function<void(std::ptrdiff_t)>& fn);
void Schedule(std::function<void()> fn);
ThreadOptions thread_options_;
// If a thread pool is created with degree_of_parallelism != 1 then an underlying
// EigenThreadPool is used to create OS threads and handle work distribution to them.
// If degree_of_parallelism == 1 then underlying_threadpool_ is left as nullptr
// and parallel work is run directly by the caller.
ExtendedThreadPoolInterface* underlying_threadpool_ = nullptr;
// If used, underlying_threadpool_ is instantiated and owned by the ThreadPool.
std::unique_ptr<ThreadPoolTempl<Env> > extended_eigen_threadpool_;
};
} // namespace concurrency
} // namespace onnxruntime
|
volume.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#include <math.h>
# define iterations 25000000
void getVolumes(int n) {
double r = 0;
double total0, total1 = 0;
#pragma omp parallel num_threads(4)
{
unsigned seed = 25234 + 17*omp_get_thread_num();
#pragma omp for private(total0) reduction(+:total1)
for (long i = 0; i < iterations; i++)
{
total0 = 0.0;
for (int j = 0; j < n; ++j)
{
r = (long double)rand_r(&seed) / RAND_MAX;
total0 += r * r;
}
if (total0 < 1)
++total1;
}
}
printf("Объем %d-мерной сферы равен %f\n", n, pow(2.0, n) * (total1 / iterations));
}
int main(int argc, char *argv[])
{
for(int i=2;i<20;i++) {
getVolumes(i);
}
} |
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(4*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(4*t3+Nx,64)),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),64*t4+62),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unop__identity_fc64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_uint16)
// op(A') function: GB (_unop_tran__identity_fc64_uint16)
// C type: GxB_FC64_t
// A type: uint16_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_uint16)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
flux.c |
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include <math.h>
#include <ktime.h>
#include <geometry.h>
#include <phy.h>
#ifdef __USE_HW_COUNTER
#include <perf.h>
#include <kperf.h>
#endif
#define MAG0 (0.5 / 3)
#define MAG1 (-MAG0)
/*
Calculates the residual
*/
void
compute_residual(struct residual *restrict res)
{
#ifdef __USE_HW_COUNTER
const struct fd fd = res->perf_counters->fd;
struct counters start;
perf_read(fd, &start);
const uint64_t icycle = __rdtsc();
#endif
struct ktime ktime;
setktime(&ktime);
const size_t bsz = res->bsz;
const size_t nfnodes = res->nfnodes;
const size_t dofs = res->dofs;
const uint32_t snfc = res->snfc;
const double pressure = res->pressure;
const double velocity_u = res->velocity_u;
const double velocity_v = res->velocity_v;
const double velocity_w = res->velocity_w;
const double *restrict f_xyz0 = res->f_xyz0;
const double *restrict f_xyz1 = res->f_xyz1;
const double *restrict f_xyz2 = res->f_xyz2;
const double *restrict xyz0 = res->xyz0;
const double *restrict xyz1 = res->xyz1;
const double *restrict xyz2 = res->xyz2;
const uint32_t *restrict ie = res->ie;
const uint32_t *restrict part = res->part;
const uint32_t *restrict snfic = res->snfic;
const uint32_t *restrict n0 = res->n0;
const uint32_t *restrict n1 = res->n1;
const uint32_t *restrict nfptr = res->nfptr;
const uint32_t *restrict sn0 = res->sn0;
const uint32_t *restrict sn1 = res->sn1;
const uint32_t *restrict sn2 = res->sn2;
const double *restrict x0 = res->x0;
const double *restrict x1 = res->x1;
const double *restrict x2 = res->x2;
const double *restrict x3 = res->x3;
const double *restrict q = res->q;
const double *restrict w0termsx = res->w0termsx;
const double *restrict w0termsy = res->w0termsy;
const double *restrict w0termsz = res->w0termsz;
const double *restrict w1termsx = res->w1termsx;
const double *restrict w1termsy = res->w1termsy;
const double *restrict w1termsz = res->w1termsz;
double *restrict gradx0 = res->gradx0;
double *restrict gradx1 = res->gradx1;
double *restrict gradx2 = res->gradx2;
memset(gradx0, 0, dofs * sizeof(double));
memset(gradx1, 0, dofs * sizeof(double));
memset(gradx2, 0, dofs * sizeof(double));
double *restrict r = res->r;
memset(r, 0, dofs * sizeof(double));
__assume_aligned(r, 64);
/*
Calculates the gradients at the nodes using weighted least squares
This solves using Gram-Schmidt
*/
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const uint32_t idx0 = bsz * node0;
const uint32_t idx1 = bsz * node1;
double dq;
double termx;
double termy;
double termz;
if(part[node0] == t)
{
termx = w0termsx[i];
termy = w0termsy[i];
termz = w0termsz[i];
dq = q[idx1 + 0] - q[idx0 + 0];
gradx0[idx0 + 0] += termx * dq;
gradx1[idx0 + 0] += termy * dq;
gradx2[idx0 + 0] += termz * dq;
dq = q[idx1 + 1] - q[idx0 + 1];
gradx0[idx0 + 1] += termx * dq;
gradx1[idx0 + 1] += termy * dq;
gradx2[idx0 + 1] += termz * dq;
dq = q[idx1 + 2] - q[idx0 + 2];
gradx0[idx0 + 2] += termx * dq;
gradx1[idx0 + 2] += termy * dq;
gradx2[idx0 + 2] += termz * dq;
dq = q[idx1 + 3] - q[idx0 + 3];
gradx0[idx0 + 3] += termx * dq;
gradx1[idx0 + 3] += termy * dq;
gradx2[idx0 + 3] += termz * dq;
}
if(part[node1] == t)
{
termx = w1termsx[i];
termy = w1termsy[i];
termz = w1termsz[i];
dq = q[idx0 + 0] - q[idx1 + 0];
gradx0[idx1 + 0] += termx * dq;
gradx1[idx1 + 0] += termy * dq;
gradx2[idx1 + 0] += termz * dq;
dq = q[idx0 + 1] - q[idx1 + 1];
gradx0[idx1 + 1] += termx * dq;
gradx1[idx1 + 1] += termy * dq;
gradx2[idx1 + 1] += termz * dq;
dq = q[idx0 + 2] - q[idx1 + 2];
gradx0[idx1 + 2] += termx * dq;
gradx1[idx1 + 2] += termy * dq;
gradx2[idx1 + 2] += termz * dq;
dq = q[idx0 + 3] - q[idx1 + 3];
gradx0[idx1 + 3] += termx * dq;
gradx1[idx1 + 3] += termy * dq;
gradx2[idx1 + 3] += termz * dq;
}
}
}
/*
Calculates the fluxes on the face and performs the flux balance
*/
#pragma omp parallel
{
uint32_t t = omp_get_thread_num();
uint32_t ie0 = ie[t];
uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
uint32_t node0 = n0[i];
uint32_t node1 = n1[i];
double xn = x0[i];
double yn = x1[i];
double zn = x2[i];
double ln = x3[i];
double xmean = 0.5f * (xyz0[node0] + xyz0[node1]);
double ymean = 0.5f * (xyz1[node0] + xyz1[node1]);
double zmean = 0.5f * (xyz2[node0] + xyz2[node1]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1, Y1, Z1;
double dot = xn;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = -dot * yn;
Z1 = -dot * zn;
}
else
{
dot = yn;
X1 = -dot * xn;
Y1 = 1.f - dot * yn;
Z1 = -dot * zn;
}
/*
Normalize the first vector
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal and V1 to get V2
*/
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/*
Get variables on "left" and "right" side of face
*/
double rx = xmean - xyz0[node0];
double ry = ymean - xyz1[node0];
double rz = zmean - xyz2[node0];
uint32_t idx0 = bsz * node0;
uint32_t idx1 = bsz * node1;
// Pressure
double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx;
pL += gradx1[idx0 + 0] * ry;
pL += gradx2[idx0 + 0] * rz;
// Velocity u
double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx;
uL += gradx1[idx0 + 1] * ry;
uL += gradx2[idx0 + 1] * rz;
// Velocity v
double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx;
vL += gradx1[idx0 + 2] * ry;
vL += gradx2[idx0 + 2] * rz;
// Velocity w
double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx;
wL += gradx1[idx0 + 3] * ry;
wL += gradx2[idx0 + 3] * rz;
double ubarL = xn * uL;
ubarL += yn * vL;
ubarL += zn * wL;
rx = xmean - xyz0[node1];
ry = ymean - xyz1[node1];
rz = zmean - xyz2[node1];
// Pressure
double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx;
pR += gradx1[idx1 + 0] * ry;
pR += gradx2[idx1 + 0] * rz;
// Velocity u
double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx;
uR += gradx1[idx1 + 1] * ry;
uR += gradx2[idx1 + 1] * rz;
// Velocity v
double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx;
vR += gradx1[idx1 + 2] * ry;
vR += gradx2[idx1 + 2] * rz;
// Velocity w
double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx;
wR += gradx1[idx1 + 3] * ry;
wR += gradx2[idx1 + 3] * rz;
double ubarR = xn * uR;
ubarR += yn * vR;
ubarR += zn * wR;
/* Compute averages */
//double p = 0.5f * (pL + pR);
double u = 0.5f * (uL + uR);
double v = 0.5f * (vL + vR);
double w = 0.5f * (wL + wR);
double ubar = xn * u;
ubar += yn * v;
ubar += zn * w;
double phi1 = xn * BETA;
phi1 += u * ubar;
double phi2 = yn * BETA;
phi2 += v * ubar;
double phi3 = zn * BETA;
phi3 += w * ubar;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double c2 = ubar * ubar + BETA;
double c = sqrt(c2);
/*
Now compute eigenvalues, eigenvectors, and strengths
*/
double eig1 = fabs(ubar);
double eig2 = fabs(ubar);
double eig3 = fabs(ubar + c);
double eig4 = fabs(ubar - c);
double dp = pR - pL;
double du = uR - uL;
double dv = vR - vL;
double dw = wR - wL;
/*
Components of T(inverse)
*/
double ti11 = u * phi4;
ti11 += v * phi5;
ti11 += w * phi6;
ti11 = -ti11 / BETA;
double ti21 = u * phi7;
ti21 += v * phi8;
ti21 += w * phi9;
ti21 = -ti21 / BETA;
double ti31 = 0.5f * (c - ubar);
ti31 /= BETA;
double ti41 = -0.5f * (c + ubar);
ti41 /= BETA;
/*
jumps (T(inverse) * dq)
*/
double dv1 = ti11 * dp;
dv1 += phi4 * du;
dv1 += phi5 * dv;
dv1 += phi6 * dw;
dv1 /= c2;
double dv2 = ti21 * dp;
dv2 += phi7 * du;
dv2 += phi8 * dv;
dv2 += phi9 * dw;
dv2 /= c2;
double dv3 = 2.f * ti31 * dp;
dv3 += xn * du;
dv3 += yn * dv;
dv3 += zn * dw;
dv3 *= 0.5f / c2;
double dv4 = 2.f * ti41 * dp;
dv4 += xn * du;
dv4 += yn * dv;
dv4 += zn * dw;
dv4 *= 0.5f / c2;
/*
Now get elements of T
*/
double r13 = c * BETA;
double r23 = u * (ubar + c);
r23 += xn * BETA;
double r33 = v * (ubar + c);
r33 += yn * BETA;
double r43 = w * (ubar + c);
r43 += zn * BETA;
double r14 = -c * BETA;
double r24 = u * (ubar - c);
r24 += xn * BETA;
double r34 = v * (ubar - c);
r34 += yn * BETA;
double r44 = w * (ubar - c);
r44 += zn * BETA;
/*
Calculate T* |lambda| * T(inverse)
*/
double t1 = eig3 * r13 * dv3 + eig4 * r14 * dv4;
double t2 = eig1 * X1 * dv1 + eig2 * X2 * dv2;
t2 += eig3 * r23 * dv3 + eig4 * r24 * dv4;
double t3 = eig1 * Y1 * dv1 + eig2 * Y2 * dv2;
t3 += eig3 * r33 * dv3 + eig4 * r34 * dv4;
double t4 = eig1 * Z1 * dv1 + eig2 * Z2 * dv2;
t4 += eig3 * r43 * dv3 + eig4 * r44 * dv4;
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
double fluxp1 = ln * BETA * ubarL;
double fluxp2 = ln * (uL * ubarL + xn * pL);
double fluxp3 = ln * (vL * ubarL + yn * pL);
double fluxp4 = ln * (wL * ubarL + zn * pL);
/*
Now the right side
*/
double fluxm1 = ln * BETA * ubarR;
double fluxm2 = ln * (uR * ubarR + xn * pR);
double fluxm3 = ln * (vR * ubarR + yn * pR);
double fluxm4 = ln * (wR * ubarR + zn * pR);
double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1);
double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2);
double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3);
double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4);
r[idx0 + 0] = (part[node0] == t) ? (r[idx0 + 0] + res1) : r[idx0 + 0];
r[idx0 + 1] = (part[node0] == t) ? (r[idx0 + 1] + res2) : r[idx0 + 1];
r[idx0 + 2] = (part[node0] == t) ? (r[idx0 + 2] + res3) : r[idx0 + 2];
r[idx0 + 3] = (part[node0] == t) ? (r[idx0 + 3] + res4) : r[idx0 + 3];
r[idx1 + 0] = (part[node1] == t) ? (r[idx1 + 0] - res1) : r[idx1 + 0];
r[idx1 + 1] = (part[node1] == t) ? (r[idx1 + 1] - res2) : r[idx1 + 1];
r[idx1 + 2] = (part[node1] == t) ? (r[idx1 + 2] - res3) : r[idx1 + 2];
r[idx1 + 3] = (part[node1] == t) ? (r[idx1 + 3] - res4) : r[idx1 + 3];
}
}
uint32_t i;
for(i = 0; i < snfc; i++)
{
uint32_t if0 = snfic[i];
uint32_t if1 = snfic[i+1];
uint32_t j;
#pragma omp parallel for
for(j = if0; j < if1; j++)
{
uint32_t node0 = sn0[j];
uint32_t node1 = sn1[j];
uint32_t node2 = sn2[j];
double p1 = q[bsz * node0];
double p2 = q[bsz * node1];
double p3 = q[bsz * node2];
double ax = xyz0[node1] - xyz0[node0];
double ay = xyz1[node1] - xyz1[node0];
double az = xyz2[node1] - xyz2[node0];
double bx = xyz0[node2] - xyz0[node0];
double by = xyz1[node2] - xyz1[node0];
double bz = xyz2[node2] - xyz2[node0];
/*
Normal points away from grid interior.
Magnitude is 1/3 area of surface triangle.
*/
double xn = ay * bz;
xn -= az * by;
xn *= MAG1;
double yn = ax * bz;
yn -= az * bx;
yn *= MAG0;
double zn = ax * by;
zn -= ay * bx;
zn *= MAG1;
double pa = 0.125f * (p2 + p3);
pa += 0.75f * p1;
double pb = 0.125f * (p3 + p1);
pb += 0.75f * p2;
double pc = 0.125f * (p1 + p2);
pc += 0.75f * p3;
uint32_t idx;
idx = bsz * node0;
r[idx + 1] += xn * pa;
r[idx + 2] += yn * pa;
r[idx + 3] += zn * pa;
idx = bsz * node1;
r[idx + 1] += xn * pb;
r[idx + 2] += yn * pb;
r[idx + 3] += zn * pb;
idx = bsz * node2;
r[idx + 1] += xn * pc;
r[idx + 2] += yn * pc;
r[idx + 3] += zn * pc;
}
}
/* Do the free boundaries */
#pragma omp parallel for
for(i = 0; i < nfnodes; i++)
{
uint32_t n = nfptr[i];
/*
Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn
has the magnitude of the face contained in it.
*/
double xn = f_xyz0[i];
double yn = f_xyz1[i];
double zn = f_xyz2[i];
double area = xn * xn;
area += yn * yn;
area += zn * zn;
area = sqrt(area);
xn /= area;
yn /= area;
zn /= area;
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1, Y1, Z1;
double dot = xn;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = -dot * yn;
Z1 = -dot * zn;
}
else
{
dot = yn;
X1 = -dot * xn;
Y1 = 1.f - dot * yn;
Z1 = -dot * zn;
}
/*
Normalize the first vector (V1)
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal with V1 to get V2
*/
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/*
Calculate elements of T and T(inverse) evaluated at free-stream
*/
double ubar0 = xn * velocity_u;
ubar0 += yn * velocity_v;
ubar0 += zn * velocity_w;
double c20 = ubar0 * ubar0 + BETA;
double c0 = sqrt(c20);
double phi1 = xn * BETA;
phi1 += velocity_u * ubar0;
double phi2 = yn * BETA;
phi2 += velocity_v * ubar0;
double phi3 = zn * BETA;
phi3 += velocity_w * ubar0;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double t13 = c0 * BETA;
double t23 = velocity_u * (ubar0 + c0);
t23 += xn * BETA;
double t33 = velocity_v * (ubar0 + c0);
t33 += yn * BETA;
double t43 = velocity_w * (ubar0 + c0);
t43 += zn * BETA;
double t14 = -c0 * BETA;
double t24 = velocity_u * (ubar0 - c0);
t24 += xn * BETA;
double t34 = velocity_v * (ubar0 - c0);
t34 += yn * BETA;
double t44 = velocity_w * (ubar0 - c0);
t44 += zn * BETA;
double ti11 = velocity_u * phi4;
ti11 += velocity_v * phi5;
ti11 += velocity_w * phi6;
ti11 = -ti11/BETA;
double ti21 = velocity_u * phi7;
ti21 += velocity_v * phi8;
ti21 += velocity_w * phi9;
ti21 = -ti21/BETA;
double ti31 = 0.5f * (c0 - ubar0);
ti31 /= BETA;
double ti41 = -0.5f * (c0 + ubar0);
ti41 /= BETA;
/*
Now, get the variables on the "inside"
*/
double pi = q[bsz * n + 0];
double ui = q[bsz * n + 1];
double vi = q[bsz * n + 2];
double wi = q[bsz * n + 3];
double un = xn * ui;
un += yn * vi;
un += zn * wi;
/*
If ubar is negative, take the reference condition from outside
*/
double pr, ur, vr, wr;
if(un > 0.f)
{
pr = pi;
ur = ui;
vr = vi;
wr = wi;
}
else
{
pr = pressure;
ur = velocity_u;
vr = velocity_v;
wr = velocity_w;
}
/*
Set rhs
*/
double rhs1 = ti11 * pr;
rhs1 += phi4 * ur;
rhs1 += phi5 * vr;
rhs1 += phi6 * wr;
rhs1 /= c20;
double rhs2 = ti21 * pr;
rhs2 += phi7 * ur;
rhs2 += phi8 * vr;
rhs2 += phi9 * wr;
rhs2 /= c20;
double rhs3 = 2.f * ti31 * pi;
rhs3 += xn * ui;
rhs3 += yn * vi;
rhs3 += zn * wi;
rhs3 = 0.5f * rhs3 / c20;
double rhs4 = 2.f * ti41 * pressure;
rhs4 += xn * velocity_u;
rhs4 += yn * velocity_v;
rhs4 += zn * velocity_w;
rhs4 = 0.5f * rhs4 / c20;
/*
Now do matrix multiplication to get values on boundary
*/
double pb = t13 * rhs3;
pb += t14 * rhs4;
double ub = X1 * rhs1;
ub += X2 * rhs2;
ub += t23 * rhs3;
ub += t24 * rhs4;
double vb = Y1 * rhs1;
vb += Y2 * rhs2;
vb += t33 * rhs3;
vb += t34 * rhs4;
double wb = Z1 * rhs1;
wb += Z2 * rhs2;
wb += t43 * rhs3;
wb += t44 * rhs4;
double ubar = xn * ub;
ubar += yn * vb;
ubar += zn * wb;
uint32_t idx = bsz * n;
r[idx + 0] += area * BETA * ubar;
r[idx + 1] += area * (ub * ubar + xn * pb);
r[idx + 2] += area * (vb * ubar + yn * pb);
r[idx + 3] += area * (wb * ubar + zn * pb);
}
compute_time(&ktime, res->t);
#ifdef __USE_HW_COUNTER
const uint64_t cycle = __rdtsc() - icycle;
struct counters end;
perf_read(fd, &end);
struct tot tot;
perf_calc(start, end, &tot);
res->perf_counters->ctrs->flux.cycles += cycle;
res->perf_counters->ctrs->flux.tot.imcR += tot.imcR;
res->perf_counters->ctrs->flux.tot.imcW += tot.imcW;
res->perf_counters->ctrs->flux.tot.edcR += tot.edcR;
res->perf_counters->ctrs->flux.tot.edcW += tot.edcW;
#endif
} |
sparse.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*********************************************************************************
NAME: sparse
PURPOSE: This program tests the efficiency with which a sparse matrix
vector multiplication is carried out
USAGE: The program takes as input the number of threads, the 2log of the linear
size of the 2D grid (equalling the 2log of the square root of the order
of the sparse matrix), the radius of the difference stencil, and the number
of times the matrix-vector multiplication is carried out.
<progname> <# threads> <# iterations> <2log root-of-matrix-order> <radius>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
reverse()
NOTES:
HISTORY: Written by Rob Van der Wijngaart, August 2006.
Updated by RvdW to parallelize matrix generation, March 2007.
Updated by RvdW to fix verification bug, February 2013
Updated by RvdW to sort matrix elements to reflect traditional CSR storage,
August 2013
***********************************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
/* linearize the grid index */
#define LIN(i,j) (i+((j)<<lsize))
/* if the scramble flag is set, convert all (linearized) grid indices by
reversing their bits; if not, leave the grid indices alone */
#ifdef SCRAMBLE
#define REVERSE(a,b) reverse((a),(b))
#else
#define REVERSE(a,b) (a)
#endif
#define BITS_IN_BYTE 8
static u64Int reverse(register u64Int, int);
static int compare(const void *el1, const void *el2);
int main(int argc, char **argv){
int iter, r; /* dummies */
int lsize; /* logarithmic linear size of grid */
int lsize2; /* logarithmic size of grid */
int size; /* linear size of grid */
s64Int size2; /* matrix order (=total # points in grid) */
int radius, /* stencil parameters */
stencil_size;
s64Int row, col, first, last; /* dummies */
s64Int i, j; /* dummies */
int iterations; /* number of times the multiplication is done */
s64Int elm; /* sequence number of matrix nonzero */
s64Int nent; /* number of nonzero entries */
double sparsity; /* fraction of non-zeroes in matrix */
double sparse_time,/* timing parameters */
avgtime;
double * RESTRICT matrix; /* sparse matrix entries */
double * RESTRICT vector; /* vector multiplying the sparse matrix */
double * RESTRICT result; /* computed matrix-vector product */
double temp; /* temporary scalar storing reduction data */
double vector_sum; /* checksum of result */
double reference_sum; /* checksum of "rhs" */
double epsilon = 1.e-8; /* error tolerance */
s64Int * RESTRICT colIndex; /* column indices of sparse matrix entries */
int nthread_input, /* thread parameters */
nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
size_t vector_space, /* variables used to hold malloc sizes */
matrix_space,
index_space;
if (argc != 5) {
printf("Usage: %s <# threads> <# iterations> <2log grid size> <stencil radius>\n",*argv);
exit(EXIT_FAILURE);
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: Iterations must be positive : %d \n", iterations);
exit(EXIT_FAILURE);
}
lsize = atoi(*++argv);
lsize2 = 2*lsize;
size = 1<<lsize;
if (lsize <0) {
printf("ERROR: Log of grid size must be greater than or equal to zero: %d\n",
(int) lsize);
exit(EXIT_FAILURE);
}
/* compute number of points in the grid */
size2 = size*size;
radius = atoi(*++argv);
if (radius <0) {
printf("ERROR: Stencil radius must be non-negative: %d\n", (int) size);
exit(EXIT_FAILURE);
}
/* emit error if (periodic) stencil overlaps with itself */
if (size <2*radius+1) {
printf("ERROR: Grid extent %d smaller than stencil diameter 2*%d+1= %d\n",
size, radius, radius*2+1);
exit(EXIT_FAILURE);
}
/* compute total size of star stencil in 2D */
stencil_size = 4*radius+1;
/* sparsity follows from number of non-zeroes per row */
sparsity = (double)(4*radius+1)/(double)size2;
/* compute total number of non-zeroes */
nent = size2*stencil_size;
matrix_space = nent*sizeof(double);
if (matrix_space/sizeof(double) != nent) {
printf("ERROR: Cannot represent space for matrix: %ul\n", matrix_space);
exit(EXIT_FAILURE);
}
matrix = (double *) malloc(matrix_space);
if (!matrix) {
printf("ERROR: Could not allocate space for sparse matrix: "FSTR64U"\n", nent);
exit(EXIT_FAILURE);
}
vector_space = 2*size2*sizeof(double);
if (vector_space/sizeof(double) != 2*size2) {
printf("ERROR: Cannot represent space for vectors: %ul\n", vector_space);
exit(EXIT_FAILURE);
}
vector = (double *) malloc(vector_space);
if (!vector) {
printf("ERROR: Could not allocate space for vectors: %d\n", (int)(2*size2));
exit(EXIT_FAILURE);
}
result = vector + size2;
index_space = nent*sizeof(s64Int);
if (index_space/sizeof(s64Int) != nent) {
printf("ERROR: Cannot represent space for column indices: %ul\n", index_space);
exit(EXIT_FAILURE);
}
colIndex = (s64Int *) malloc(index_space);
if (!colIndex) {
printf("ERROR: Could not allocate space for column indices: "FSTR64U"\n",
nent*sizeof(s64Int));
exit(EXIT_FAILURE);
}
#pragma omp parallel private (row, col, elm, first, last, iter)
{
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP Sparse matrix-vector multiplication\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %16d\n",nthread_input);
printf("Matrix order = "FSTR64U"\n", size2);
printf("Stencil diameter = %16d\n", 2*radius+1);
printf("Sparsity = %16.10lf\n", sparsity);
printf("Number of iterations = %16d\n", iterations);
#ifdef SCRAMBLE
printf("Using scrambled indexing\n");
#else
printf("Using canonical indexing\n");
#endif
}
}
bail_out(num_error);
/* initialize the input and result vectors */
#pragma omp for
for (row=0; row<size2; row++) result[row] = vector[row] = 0.0;
/* fill matrix with nonzeroes corresponding to difference stencil. We use the
scrambling for reordering the points in the grid. */
#pragma omp for private (i,j,r)
for (row=0; row<size2; row++) {
j = row/size; i=row%size;
elm = row*stencil_size;
colIndex[elm] = REVERSE(LIN(i,j),lsize2);
for (r=1; r<=radius; r++, elm+=4) {
colIndex[elm+1] = REVERSE(LIN((i+r)%size,j),lsize2);
colIndex[elm+2] = REVERSE(LIN((i-r+size)%size,j),lsize2);
colIndex[elm+3] = REVERSE(LIN(i,(j+r)%size),lsize2);
colIndex[elm+4] = REVERSE(LIN(i,(j-r+size)%size),lsize2);
}
/* sort colIndex to make sure the compressed row accesses
vector elements in increasing order */
qsort(&(colIndex[row*stencil_size]), stencil_size, sizeof(s64Int), compare);
for (elm=row*stencil_size; elm<(row+1)*stencil_size; elm++)
matrix[elm] = 1.0/(double)(colIndex[elm]+1);
}
for (iter=0; iter<=iterations; iter++) {
/* start timer after a warmup iteration */
if (iter == 1) {
#pragma omp barrier
#pragma omp master
{
sparse_time = wtime();
}
}
/* fill vector */
#pragma omp for
for (row=0; row<size2; row++) vector[row] += (double) (row+1);
/* do the actual matrix-vector multiplication */
#pragma omp for
for (row=0; row<size2; row++) {
first = stencil_size*row; last = first+stencil_size-1;
#pragma simd reduction(+:temp)
for (temp=0.0,col=first; col<=last; col++) {
temp += matrix[col]*vector[colIndex[col]];
}
result[row] += temp;
}
} /* end of iterations */
#pragma omp barrier
#pragma omp master
{
sparse_time = wtime() - sparse_time;
}
} /* end of parallel region */
/* verification test */
reference_sum = 0.5 * (double) nent * (double) (iterations+1) *
(double) (iterations +2);
vector_sum = 0.0;
for (row=0; row<size2; row++) vector_sum += result[row];
if (ABS(vector_sum-reference_sum) > epsilon) {
printf("ERROR: Vector sum = %lf, Reference vector sum = %lf\n",
vector_sum, reference_sum);
exit(EXIT_FAILURE);
}
else {
printf("Solution validates\n");
#ifdef VERBOSE
printf("Reference sum = %lf, vector sum = %lf\n",
reference_sum, vector_sum);
#endif
}
avgtime = sparse_time/iterations;
printf("Rate (MFlops/s): %lf Avg time (s): %lf\n",
1.0E-06 * (2.0*nent)/avgtime, avgtime);
exit(EXIT_SUCCESS);
}
/* Code below reverses bits in unsigned integer stored in a 64-bit word.
Bit reversal is with respect to the largest integer that is going to be
processed for the particular run of the code, to make sure the reversal
constitutes a true permutation. Hence, the final result needs to be shifted
to the right.
Example: if largest integer being processed is 0x000000ff = 255 =
0000...0011111111 (binary), then the unshifted reversal of 0x00000006 = 6 =
0000...0000000110 (binary) would be 011000000...0000 = 3*2^61, which is
outside the range of the original sequence 0-255. Setting shift_in_bits to
2log(256) = 8, the final result is shifted the the right by 64-8=56 bits,
so we get 000...0001100000 (binary) = 96, which is within the proper range */
u64Int reverse(register u64Int x, int shift_in_bits){
x = ((x >> 1) & 0x5555555555555555) | ((x << 1) & 0xaaaaaaaaaaaaaaaa);
x = ((x >> 2) & 0x3333333333333333) | ((x << 2) & 0xcccccccccccccccc);
x = ((x >> 4) & 0x0f0f0f0f0f0f0f0f) | ((x << 4) & 0xf0f0f0f0f0f0f0f0);
x = ((x >> 8) & 0x00ff00ff00ff00ff) | ((x << 8) & 0xff00ff00ff00ff00);
x = ((x >> 16) & 0x0000ffff0000ffff) | ((x << 16) & 0xffff0000ffff0000);
x = ((x >> 32) & 0x00000000ffffffff) | ((x << 32) & 0xffffffff00000000);
return (x>>((sizeof(u64Int)*BITS_IN_BYTE-shift_in_bits)));
}
int compare(const void *el1, const void *el2) {
s64Int v1 = *(s64Int *)el1;
s64Int v2 = *(s64Int *)el2;
return (v1<v2) ? -1 : (v1>v2) ? 1 : 0;
}
|
pi.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
double compute_pi(long n, int seed);
int main(int argc, char *argv[]) {
int rank, size, seed;
long n;
double pi, global_pi;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (rank == 0) {
if (argc < 2) {
n = 1000;
} else {
n = atol(argv[1]);
}
n /= size;
}
MPI_Bcast(&n, 1, MPI_LONG, 0, MPI_COMM_WORLD);
pi = compute_pi(n, rank);
printf("rank %d: %ld, %.5f\n", rank, n, pi);
MPI_Reduce(&pi, &global_pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
printf("pi = %.5f\n", global_pi/size);
}
MPI_Finalize();
return EXIT_SUCCESS;
}
double compute_pi(long n, int rank) {
double count = 0.0;
#pragma omp parallel shared(count, n, rank) default(none)
{
long i;
struct timeval time;
gettimeofday(&time, NULL);
int seed = (int) (time.tv_usec*(17*rank + 1) +
time.tv_sec/(rank + 1));
#ifdef _OPENMP
int nr_threads = 1, thread_nr = 0;
nr_threads = omp_get_num_threads();
thread_nr = omp_get_thread_num();
seed += 17*thread_nr;
#endif
#pragma omp for reduction(+:count)
for (i = 0; i < n; i++) {
double x = ((double) rand_r(&seed))/RAND_MAX;
double y = ((double) rand_r(&seed))/RAND_MAX;
if (x*x + y*y <= 1.0) {
count += 1.0;
}
}
}
return 4.0*count/n;
}
|
GB_binop__rdiv_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint16)
// A*D function (colscale): GB (_AxD__rdiv_uint16)
// D*A function (rowscale): GB (_DxB__rdiv_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint16)
// C=scalar+B GB (_bind1st__rdiv_uint16)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint16)
// C=A+scalar GB (_bind2nd__rdiv_uint16)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_UNSIGNED (y, x, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT16 || GxB_NO_RDIV_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 16) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 16) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
adapter.h | /**
* Implementation of a lock-free b-slack tree using LLX/SCX.
* Trevor Brown, 2018.
*/
#ifndef DS_ADAPTER_H
#define DS_ADAPTER_H
#include <iostream>
#include "errors.h"
#ifdef USE_TREE_STATS
# define TREE_STATS_BYTES_AT_DEPTH
# include "tree_stats.h"
#endif
#include "bslack_impl.h"
#if !defined FAT_NODE_DEGREE
// #warning "FAT_NODE_DEGREE was not defined... using default: 16."
#define FAT_NODE_DEGREE 16
#endif
#define NODE_T bslack_ns::Node<FAT_NODE_DEGREE, K>
#define RECORD_MANAGER_T record_manager<Reclaim, Alloc, Pool, NODE_T>
#define DATA_STRUCTURE_T bslack_ns::bslack<FAT_NODE_DEGREE, K, std::less<K>, RECORD_MANAGER_T>
template <typename K, typename V, class Reclaim = reclaimer_debra<K>, class Alloc = allocator_new<K>, class Pool = pool_none<K>>
class ds_adapter {
private:
DATA_STRUCTURE_T * const ds;
public:
ds_adapter(const int NUM_THREADS,
const K& KEY_ANY,
const K& unused1,
const V& unused2,
Random64 * const unused3)
: ds(new DATA_STRUCTURE_T(NUM_THREADS, KEY_ANY))
{
if (sizeof(V) > sizeof(void *)) {
setbench_error("Value type V is too large to fit in void *. This data structure stores all values in fields of type void *, so this is a problem.");
}
if (NUM_THREADS > MAX_THREADS_POW2) {
setbench_error("NUM_THREADS exceeds MAX_THREADS_POW2");
}
}
~ds_adapter() {
delete ds;
}
void * getNoValue() {
return ds->NO_VALUE;
}
void initThread(const int tid) {
ds->initThread(tid);
}
void deinitThread(const int tid) {
ds->deinitThread(tid);
}
bool contains(const int tid, const K& key) {
return ds->contains(tid, key);
}
V insert(const int tid, const K& key, const V& val) {
return (V) ds->insert(tid, key, val);
}
V insertIfAbsent(const int tid, const K& key, const V& val) {
return (V) ds->insertIfAbsent(tid, key, val);
}
V erase(const int tid, const K& key) {
return (V) ds->erase(tid, key).first;
}
V find(const int tid, const K& key) {
return (V) ds->find(tid, key).first;
}
int rangeQuery(const int tid, const K& lo, const K& hi, K * const resultKeys, V * const resultValues) {
return ds->rangeQuery(tid, lo, hi, resultKeys, (void ** const) resultValues);
}
void printSummary() {
ds->debugGetRecMgr()->printStatus();
}
bool validateStructure() {
return true;
}
void printObjectSizes() {
std::cout<<"size_node="<<(sizeof(NODE_T))<<std::endl;
}
// try to clean up: must only be called by a single thread as part of the test harness!
void debugGCSingleThreaded() {
ds->debugGetRecMgr()->debugGCSingleThreaded();
}
#ifdef USE_TREE_STATS
class NodeHandler {
public:
typedef NODE_T * NodePtrType;
K minKey;
K maxKey;
NodeHandler(const K& _minKey, const K& _maxKey) {
minKey = _minKey;
maxKey = _maxKey;
}
class ChildIterator {
private:
size_t ix;
NodePtrType node; // node being iterated over
public:
ChildIterator(NodePtrType _node) { node = _node; ix = 0; }
bool hasNext() { return ix < node->size; }
NodePtrType next() { return node->ptrs[ix++]; }
};
static bool isLeaf(NodePtrType node) { return node->leaf; }
static ChildIterator getChildIterator(NodePtrType node) { return ChildIterator(node); }
static size_t getNumChildren(NodePtrType node) { return node->size; }
static size_t getNumKeys(NodePtrType node) { return isLeaf(node) ? node->size : 0; }
static size_t getSumOfKeys(NodePtrType node) {
size_t sz = getNumKeys(node);
size_t result = 0;
for (size_t i=0;i<sz;++i) {
result += (size_t) node->keys[i];
}
return result;
}
static size_t getSizeInBytes(NodePtrType node) { return sizeof(*node); }
};
TreeStats<NodeHandler> * createTreeStats(const K& _minKey, const K& _maxKey) {
return new TreeStats<NodeHandler>(new NodeHandler(_minKey, _maxKey), ds->debug_getEntryPoint(), true);
}
#endif
private:
template<typename... Arguments>
void iterate_helper_fn(int depth, void (*callback)(K key, V value, Arguments... args)
, NODE_T * node, Arguments... args) {
if (node == NULL) return;
if (node->leaf) {
for (int i=0;i<node->getABDegree();++i) {
K key = node->keys[i];
V val = (V) node->ptrs[i];
callback(key, val, args...);
}
return;
}
for (int i=0;i<node->getABDegree();++i) {
if (depth == 4) {
#pragma omp task
iterate_helper_fn(1+depth, callback, (NODE_T *) node->ptrs[i], args...);
} else {
iterate_helper_fn(1+depth, callback, (NODE_T *) node->ptrs[i], args...);
}
}
}
public:
#define DS_ADAPTER_SUPPORTS_TERMINAL_ITERATE
template<typename... Arguments>
void iterate(void (*callback)(K key, V value, Arguments... args), Arguments... args) {
#pragma omp parallel
{
#pragma omp single
iterate_helper_fn(0, callback, ds->debug_getEntryPoint(), args...);
}
}
};
#undef RECORD_MANAGER_T
#undef DATA_STRUCTURE_T
#undef FAT_NODE_DEGREE
#endif
|
omp_atomic.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
int test_omp_atomic()
{
int sum;
int diff;
double dsum = 0;
double dt = 0.5; /* base of geometric row for + and - test*/
double ddiff;
int product;
int x;
int *logics;
int bit_and = 1;
int bit_or = 0;
int exclusiv_bit_or = 0;
int j;
int known_sum;
int known_diff;
int known_product;
int result = 0;
int logic_and = 1;
int logic_or = 0;
double dknown_sum;
double rounding_error = 1.E-9;
double dpt, div;
int logicsArray[LOOPCOUNT];
logics = logicsArray;
sum = 0;
diff = 0;
product = 1;
// sum of integers test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++) {
#pragma omp atomic
sum += i;
}
}
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
if (known_sum != sum)
{
fprintf(stderr,
"Error in sum with integers: Result was %d instead of %d.\n",
sum, known_sum);
result++;
}
// difference of integers test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; i++) {
#pragma omp atomic
diff -= i;
}
}
known_diff = ((LOOPCOUNT - 1) * LOOPCOUNT) / 2 * -1;
if (diff != known_diff)
{
fprintf (stderr,
"Error in difference with integers: Result was %d instead of 0.\n",
diff);
result++;
}
// sum of doubles test
dsum = 0;
dpt = 1;
for (j = 0; j < DOUBLE_DIGITS; ++j) {
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 -dt);
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i) {
#pragma omp atomic
dsum += pow (dt, i);
}
}
if (dsum != dknown_sum && (fabs (dsum - dknown_sum) > rounding_error)) {
fprintf (stderr, "Error in sum with doubles: Result was %f"
" instead of: %f (Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
result++;
}
// difference of doubles test
dpt = 1;
for (j = 0; j < DOUBLE_DIGITS; ++j) {
dpt *= dt;
}
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i) {
#pragma omp atomic
ddiff -= pow (dt, i);
}
}
if (fabs (ddiff) > rounding_error) {
fprintf (stderr,
"Error in difference with doubles: Result was %E instead of 0.0\n",
ddiff);
result++;
}
// product of integers test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++) {
#pragma omp atomic
product *= i;
}
}
known_product = KNOWN_PRODUCT;
if (known_product != product) {
fprintf (stderr,
"Error in product with integers: Result was %d instead of %d\n",
product, known_product);
result++;
}
// division of integers test
product = KNOWN_PRODUCT;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; ++i) {
#pragma omp atomic
product /= i;
}
}
if (product != 1) {
fprintf (stderr,
"Error in product division with integers: Result was %d"
" instead of 1\n",
product);
result++;
}
// division of doubles test
div = 5.0E+5;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++) {
#pragma omp atomic
div /= i;
}
}
if (fabs(div-0.137787) >= 1.0E-4 ) {
result++;
fprintf (stderr, "Error in division with double: Result was %f"
" instead of 0.137787\n", div);
}
// ++ test
x = 0;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
x++;
}
}
if (x != LOOPCOUNT) {
result++;
fprintf (stderr, "Error in ++\n");
}
// -- test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
x--;
}
}
if (x != 0) {
result++;
fprintf (stderr, "Error in --\n");
}
// bit-and test part 1
for (j = 0; j < LOOPCOUNT; ++j) {
logics[j] = 1;
}
bit_and = 1;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
bit_and &= logics[i];
}
}
if (!bit_and) {
result++;
fprintf (stderr, "Error in BIT AND part 1\n");
}
// bit-and test part 2
bit_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
bit_and &= logics[i];
}
}
if (bit_and) {
result++;
fprintf (stderr, "Error in BIT AND part 2\n");
}
// bit-or test part 1
for (j = 0; j < LOOPCOUNT; j++) {
logics[j] = 0;
}
bit_or = 0;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
bit_or |= logics[i];
}
}
if (bit_or) {
result++;
fprintf (stderr, "Error in BIT OR part 1\n");
}
// bit-or test part 2
bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
bit_or |= logics[i];
}
}
if (!bit_or) {
result++;
fprintf (stderr, "Error in BIT OR part 2\n");
}
// bit-xor test part 1
for (j = 0; j < LOOPCOUNT; j++) {
logics[j] = 0;
}
exclusiv_bit_or = 0;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
exclusiv_bit_or ^= logics[i];
}
}
if (exclusiv_bit_or) {
result++;
fprintf (stderr, "Error in EXCLUSIV BIT OR part 1\n");
}
// bit-xor test part 2
exclusiv_bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
exclusiv_bit_or ^= logics[i];
}
}
if (!exclusiv_bit_or) {
result++;
fprintf (stderr, "Error in EXCLUSIV BIT OR part 2\n");
}
// left shift test
x = 1;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < 10; ++i) {
#pragma omp atomic
x <<= 1;
}
}
if ( x != 1024) {
result++;
fprintf (stderr, "Error in <<\n");
x = 1024;
}
// right shift test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < 10; ++i) {
#pragma omp atomic
x >>= 1;
}
}
if (x != 1) {
result++;
fprintf (stderr, "Error in >>\n");
}
return (result == 0);
} // test_omp_atomic()
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_atomic()) {
num_failed++;
}
}
return num_failed;
}
|
openbsdsoftraid_fmt_plug.c | /*
* Copyright (c) 2014 Thiébaud Weksteen <thiebaud at weksteen dot fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Fixed BE issues, and build problems (Fall 2014), JimF.
*/
#include "arch.h"
#if FMT_EXTERNS_H
extern struct fmt_main fmt_openbsd_softraid;
#elif FMT_REGISTERS_H
john_register_one(&fmt_openbsd_softraid);
#else
#include "aes.h"
#include "hmac_sha.h"
#include "sha.h"
#include "common.h"
#include "formats.h"
#include "pbkdf2_hmac_sha1.h"
#include "loader.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "OpenBSD-SoftRAID"
#define FORMAT_NAME ""
#define FORMAT_TAG "$openbsd-softraid$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (8192 iterations)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define OPENBSD_SOFTRAID_SALTLENGTH 128
#define OPENBSD_SOFTRAID_KEYS 32
#define OPENBSD_SOFTRAID_KEYLENGTH 64 /* AES-XTS-256 keys are 512 bits long */
#define OPENBSD_SOFTRAID_MACLENGTH 20
#define BINARY_SIZE OPENBSD_SOFTRAID_MACLENGTH
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
static struct fmt_tests tests_openbsdsoftraid[] = {
// too long of line was causing my Sparc box to fail to compile this code
{"\
$openbsd-softraid$8192$c2891132ca5305d1189a7da94d32de29182abc2f56dc641d685e471935f2646e06b79f1d6c102c2f62f3757a20efb0a110b8ae207f9129f0dc5eea8ab05cc8280e0ba2460faf979dbac9f577c4a083349064364556b7ad15468c17c4d794c3da0ddf5990cc66751a6ded8d534531dd9aa9fce2f43e68d6a7200e135beb55e752$311c42d1d8daf1e47e0150c8d4a455a0567b062970c1838faaedcd3e43795545de64971c7598902a6e2c3fffcf8abe2ef78979164d0c9089fbb931c4c9dac8b86c85eeace11095e38487e41eb7b6094d96c339e86686121fbe1c32dbff3c00706926b22ec3a1329f346c599d132105b5d182a380161504d535f9836bb7286331adce1e47e4e251a0249612a94312bb309a6f4558568467731c1ae8c9b910d27102dca2a72228ffde7bfc60004c8ab33ca2b01aa476c4f42f99a3d1f904e3bbc56270edb314a62e92cf68185ace93731ef4ce08dff3c695c45e35b57ed8ab1552114635eb2ff531437ba5c3a08ebf3e73b6bbb7fe1ad98373da349f09284ae819b6a2f6fc5a10aec347f3c2331abc1d6617e77d68f314fdb683294f3ef351869491c4fb096969924215d711c15e5fce533dc5acaed4a473b14c595bababc178e62ef065770716520ecddc7cbf1cbed1250b7e004ab975bc29780c952087ec382bf6e77447720a10a8c2993262a2b21f8a3f47e35daa5b620573626b474d3e8abf8e73164664b041a18fe35c2a1905fad617bf6e6c380fdeeb680fa89b6c6dc7676ad93fde25076ecb8855d623b45af9a16a62a957d85c4c70896019be1827ad9320a69f18bdfc2674f04babdbfcd679c0ef22f7ab2a18818b9b425e61d8c06196a23babd0aefd5a00f1b297a66d973daae40f4dbd9be60d8953fafbd51f7745e2d04b5c80b63ad1f550cd939490b346d4fe7c1fc266d593bcafac0d8989994e174de6d1ef4ce78b3224ea4e68ccbf998654a067558537be332f5cae4b44c18664428d45b71cde5b53bedddf8a7daf47fce212578b72\
7e420c91de0baa1108683dd5b5534e81f4fe945d27fd9d28934afc8d15d95932952c0be717d4d87bb8255bf658a083c3aed643f7a6cfb56fbcbdab9e0a7348b0a3a91e3d560d1ec96f5769551e64beb54a499f6d6dd37e4361d484fe4f7bac4dc26c8a1a2609592d527b134c8212d71b3578217e0ec1da317c69e7e8c39d2d5b2d4073fa9c618a01a092b61613f6f1e41e6ab43d8ca010f177947aeab2884e9a4dd28453ff5bdadb765680733e7af1463ec1b20b879ae01c9256da0207811f956b3950f6db743a9e34a6d8f0fdfa5c47b4f807f0017c2092d72dc19d111711e796ffc4035da3a4caa6a5301491d0473b0d47cd01b705ff11a10263867013a11c65462c311fa5ac9a2598142779b55f09dbec89ac18049c29e5baf3aa38696a3b92d08b02cb10af5389e06058b3ad8be09b121e4e320520413775b7c6fbb3f2b332e3ac0295a4a4dfb4a56ea1c32bc28c149ffaa3b426f5a17a11afe56426b38966c86734654fe05a611c8f025ee4092656c097bbf59743c31508fa9e80ff86a2ae33d401ec316e65eef251d173e9565ffc1672b8b341174427a851a6a4c42554848c637283d13d4ba5b5414b4e61ade6ec7ef7b77186a81adff381e6a79d3dac2c68bf386f100fef1c354221a2ba3d8a7a10460f637eaa152ab79027ab94e5965660de3ed66dac4a0f8e75b85d768e51c8e82a26cb81249ca8d249d8c5cdc8bd55289679d3915a397d31863334df18e2fe3ef9069b064c4ef6b418e5388817040ae9922e5e9f57a8bf3b3fe04748b9cf5068ac86f942b4068853602a6c6c794423569b665b359d5f947c2e5ff194d23d953b435b2b3834513fdfda2b66fcea22883690b1cc56c2fcaa5600895ff8d8ae9e3a6a2b6258ff873242d1128b20e7d1e843ade1bd206b541eba02a214a95cd83860865f947cb4adbd465957055060df05e53fa9ea4b29867c92b224be939d3715be0e61b7aa0e24a8f25bccfa3b7901a3f0a8cb25498d7c9899d435b409220723dcde1d38ab6d4e7cfb42d443c9b65a37\
53891f46adb9bc52574699a7b642955702ed662d04cbe21aeec7c15db7e325dcaa74c85c5e3ed54424642d5bd8d3109c2d4c0079b3d2c5f2da12ad5b25407ae48f6fe4fc653b23a7f2d56a93c898dd0bd59ba02295934c9f7ffb433ef611d51b7c203f374cf9e8b69d4952ccc44593447ad41540270b0e30c349401048cbce10a0e1bae373de15c878982b0af837fb5432cd2471516d1e218296ce462a59fd5412921bbd3f75cf65070f7bafe21105ba83f7ffe8ece71534863c0dd731a2f3c29fff97b8ce798890a1b158a8891bb6f2dd751e75c0cb0db7ea152d7cdc91663f46f85d12ce0015351dba5225b2a87b64cc30518b23e31b2bfbb0b2a5042eeaea1234a57549a3e55ddd708e3380df032e93071b10b3e6902152c90ffd99bda0177a197779341307c5d9f335e698259ade70564eab9d2856aa1aa814211e71ba2885ef9cd5f5bdd225af2f6eebf775cc0bbdb3e519edb7c49a9a1984cc0cc012679aca8fd1d002fa64b2df095b4a9e2b496e3f4b544955c817efb29562cf8b3d2eeccbe4d364ce71d2d12b504b11de4747139ef505bdd12f382eb02fa3f5272b710644a9c20660ca5b4fa74be60984240b555c1f34261ee1d72d9eb2cc680f32b4603865503addc3a1fdc49d2b158d3407a282edd72ef51ad021338fdebf413726e1778e3bc3909b670d3f40e824391c5525b162ea01c29205e12f8e62bdd8cd0f21f6f7b44af4521c2dd23a7f3508e5dc6fffa3365e4ca1cac33bb515a5c5495dc059a94396de7d802758b65bb4cecb90bf69ab4126eab85958cb8b64eedf3a0955ab42cdc98ef90620e10cc854b9c02bfaff60742494a0c3bb34ef6d6bb861b275d975bdc4a10ac922dc70c1b03a4c01943a704af36ec8d79cf2f9ce0f602f01bef4a32edeb8fbba863c945552efc814410ac6bb839349ea65879644003bdda35d40eabdc9dcfb2d67d945b7f111ab62591763a0dd2d338594eff004237e5acce69dd9d2cdbb9ce121bd$5337e4ba9d877a1e84559688386fbc844c5fe557", "password1" },
{NULL}
};
static char (*key_buffer)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
unsigned int num_iterations;
unsigned char salt[OPENBSD_SOFTRAID_SALTLENGTH];
unsigned char masked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
key_buffer = mem_calloc(sizeof(*key_buffer), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(key_buffer);
}
static int valid(char* ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (!isdec(p)) /* iterations */
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * 128) /* salt */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * 32 * 64) /* masked keys */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * BINARY_SIZE) /* HMAC-SHA1 */
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void* get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "$"); /* iterations */
cs.num_iterations = atoi(p);
p = strtokm(NULL, "$"); /* salt */
for (i = 0; i < OPENBSD_SOFTRAID_SALTLENGTH ; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$"); /* masked keys */
for (i = 0; i < OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS; i++)
cs.masked_keys[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
AES_KEY akey;
unsigned char mask_key[MAX_KEYS_PER_CRYPT][32];
unsigned char unmasked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];
unsigned char hashed_mask_key[20];
int i, j;
/* derive masking key from password */
#ifdef SSE_GROUP_SZ_SHA1
int lens[SSE_GROUP_SZ_SHA1];
unsigned char *pin[SSE_GROUP_SZ_SHA1], *pout[SSE_GROUP_SZ_SHA1];
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(key_buffer[index+i]);
pin[i] = (unsigned char*)key_buffer[index+i];
pout[i] = mask_key[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
cur_salt->num_iterations, (unsigned char**)pout,
32, 0);
#else
pbkdf2_sha1((const unsigned char*)(key_buffer[index]),
strlen(key_buffer[index]),
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
cur_salt->num_iterations, mask_key[0],
32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
/* decrypt sector keys */
AES_set_decrypt_key(mask_key[i], 256, &akey);
for(j = 0; j < (OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS) / 16; j++) {
AES_decrypt(&cur_salt->masked_keys[16*j], &unmasked_keys[16*j], &akey);
}
/* get SHA1 of mask_key */
SHA1(mask_key[i], 32, hashed_mask_key);
hmac_sha1(hashed_mask_key, OPENBSD_SOFTRAID_MACLENGTH,
unmasked_keys, OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS,
(unsigned char*)crypt_out[index+i], 20);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*(ARCH_WORD_32*)binary == *(ARCH_WORD_32*)(crypt_out[index]))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return (*(ARCH_WORD_32*)binary == *(ARCH_WORD_32*)(crypt_out[index]));
}
static int cmp_exact(char *source, int index)
{
void *bin = get_binary(source);
return !memcmp(bin, crypt_out[index], 20);
}
static void jtr_set_key(char* key, int index)
{
strcpy(key_buffer[index], key);
}
static char *get_key(int index)
{
return key_buffer[index];
}
/* report iteration count as tunable cost */
static unsigned int iteration_count(void *salt)
{
return ((struct custom_salt*)salt)->num_iterations;
}
struct fmt_main fmt_openbsd_softraid = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
tests_openbsdsoftraid
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
jtr_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
GB_unop__lnot_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_uint32_uint32
// op(A') function: GB_unop_tran__lnot_uint32_uint32
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_uint32_uint32
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_uint32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hello.c | // Load the OpenMP functions library
#include<omp.h>
int main()
{
int nthreads, tid;
// Fork a team of threads, with private versions of the declared variables.
// #pragma omp parallel private(nthreads, tid)
// {
// Get the thread number and print it
tid = omp_get_thread_num();
printf("Hello World from thread number %d\n", tid);
// Only the master thread does the following
// #pragma omp barrier
// #pragma omp master
// {
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
// }
// End of pragma, disband all but the master thread
//}
return 0;
}
|
conv_im2col_sgemm_neon_im2col.h | // Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv_im2col_sgemm_neon_im2col(const Mat &bottom_blob, Mat &top_blob,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt,
int w, int inch, int outw, int outh, int outch)
{
//size_t elemsize = bottom_blob.elemsize;
Mat bottom_im2col = top_blob;
{
const int stride = kernel_h*kernel_w*outw*outh;
float* ret = (float*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<inch; p++)
{
const float* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
}
}
|
convolution_sgemm.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __AVX__
static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size)
{
const float* kernel = _kernel;
// kernel memory packed 8 x 8
kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
const float* k0 = kernel + (p+0)*inch*kernel_size;
const float* k1 = kernel + (p+1)*inch*kernel_size;
const float* k2 = kernel + (p+2)*inch*kernel_size;
const float* k3 = kernel + (p+3)*inch*kernel_size;
const float* k4 = kernel + (p+4)*inch*kernel_size;
const float* k5 = kernel + (p+5)*inch*kernel_size;
const float* k6 = kernel + (p+6)*inch*kernel_size;
const float* k7 = kernel + (p+7)*inch*kernel_size;
float* ktmp = kernel_tm.channel(p/8);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const float* k0 = kernel + (p+0)*inch*kernel_size;
const float* k1 = kernel + (p+1)*inch*kernel_size;
const float* k2 = kernel + (p+2)*inch*kernel_size;
const float* k3 = kernel + (p+3)*inch*kernel_size;
float* ktmp = kernel_tm.channel(p/8 + (p%8)/4);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p=remain_outch_start; p<outch; p++)
{
const float* k0 = kernel + (p+0)*inch*kernel_size;
float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv_im2col_sgemm_sse(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
// im2col
Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator);
{
const int stride = kernel_h*kernel_w*outw*outh;
float* ret = (float*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<inch; p++)
{
const float* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// bottom_im2col memory packed 8 x 8
Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, elemsize, opt.workspace_allocator);
{
int nn_size = out_size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i/8);
for (int q=0; q<inch*kernel_size; q++)
{
#if __AVX__
_mm256_storeu_ps(tmpptr, _mm256_loadu_ps(img0));
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
#endif // __SSE__
tmpptr += 8;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i/8 + i%8);
for (int q=0; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
// sgemm(int M, int N, int L, float* A, float* B, float* C)
{
//int M = outch; // outch
int N = outw * outh; // outsize or out stride
int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 8;
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i+1);
float* output2 = top_blob.channel(i+2);
float* output3 = top_blob.channel(i+3);
float* output4 = top_blob.channel(i+4);
float* output5 = top_blob.channel(i+5);
float* output6 = top_blob.channel(i+6);
float* output7 = top_blob.channel(i+7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + i : zeros;
int j=0;
for (; j+7<N; j=j+8)
{
const float* vb = bottom_tm.channel(j/8);
const float* va = kernel_tm.channel(i/8);
#if __AVX__
__m256 _sum0 = _mm256_broadcast_ss(biasptr);
__m256 _sum1 = _mm256_broadcast_ss(biasptr+1);
__m256 _sum2 = _mm256_broadcast_ss(biasptr+2);
__m256 _sum3 = _mm256_broadcast_ss(biasptr+3);
__m256 _sum4 = _mm256_broadcast_ss(biasptr+4);
__m256 _sum5 = _mm256_broadcast_ss(biasptr+5);
__m256 _sum6 = _mm256_broadcast_ss(biasptr+6);
__m256 _sum7 = _mm256_broadcast_ss(biasptr+7);
int k=0;
for (; k+3<L; k=k+4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va+1);
__m256 _va2 = _mm256_broadcast_ss(va+2);
__m256 _va3 = _mm256_broadcast_ss(va+3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb+8);
__m256 _vb2 = _mm256_loadu_ps(vb+16);
__m256 _vb3 = _mm256_loadu_ps(vb+24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_va0 = _mm256_broadcast_ss(va+4);
_va1 = _mm256_broadcast_ss(va+5);
_va2 = _mm256_broadcast_ss(va+6);
_va3 = _mm256_broadcast_ss(va+7);
_sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70
va += 8;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va+1);
_va2 = _mm256_broadcast_ss(va+2);
_va3 = _mm256_broadcast_ss(va+3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
_va0 = _mm256_broadcast_ss(va+4);
_va1 = _mm256_broadcast_ss(va+5);
_va2 = _mm256_broadcast_ss(va+6);
_va3 = _mm256_broadcast_ss(va+7);
_sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41
_sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51
_sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61
_sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71
va += 8;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va+1);
_va2 = _mm256_broadcast_ss(va+2);
_va3 = _mm256_broadcast_ss(va+3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
_va0 = _mm256_broadcast_ss(va+4);
_va1 = _mm256_broadcast_ss(va+5);
_va2 = _mm256_broadcast_ss(va+6);
_va3 = _mm256_broadcast_ss(va+7);
_sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42
_sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52
_sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62
_sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72
va += 8;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va+1);
_va2 = _mm256_broadcast_ss(va+2);
_va3 = _mm256_broadcast_ss(va+3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
_va0 = _mm256_broadcast_ss(va+4);
_va1 = _mm256_broadcast_ss(va+5);
_va2 = _mm256_broadcast_ss(va+6);
_va3 = _mm256_broadcast_ss(va+7);
_sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43
_sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53
_sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63
_sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73
va += 8;
vb += 32;
}
for (; k<L; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va+1);
__m256 _va2 = _mm256_broadcast_ss(va+2);
__m256 _va3 = _mm256_broadcast_ss(va+3);
__m256 _va4 = _mm256_broadcast_ss(va+4);
__m256 _va5 = _mm256_broadcast_ss(va+5);
__m256 _va6 = _mm256_broadcast_ss(va+6);
__m256 _va7 = _mm256_broadcast_ss(va+7);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70
va += 8;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
_mm256_storeu_ps(output4, _sum4);
_mm256_storeu_ps(output5, _sum5);
_mm256_storeu_ps(output6, _sum6);
_mm256_storeu_ps(output7, _sum7);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
float sum4[8] = {0};
float sum5[8] = {0};
float sum6[8] = {0};
float sum7[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
va += 8;
sum0[n] += va[0] * vb[n+8];
sum1[n] += va[1] * vb[n+8];
sum2[n] += va[2] * vb[n+8];
sum3[n] += va[3] * vb[n+8];
sum4[n] += va[4] * vb[n+8];
sum5[n] += va[5] * vb[n+8];
sum6[n] += va[6] * vb[n+8];
sum7[n] += va[7] * vb[n+8];
va += 8;
sum0[n] += va[0] * vb[n+16];
sum1[n] += va[1] * vb[n+16];
sum2[n] += va[2] * vb[n+16];
sum3[n] += va[3] * vb[n+16];
sum4[n] += va[4] * vb[n+16];
sum5[n] += va[5] * vb[n+16];
sum6[n] += va[6] * vb[n+16];
sum7[n] += va[7] * vb[n+16];
va += 8;
sum0[n] += va[0] * vb[n+24];
sum1[n] += va[1] * vb[n+24];
sum2[n] += va[2] * vb[n+24];
sum3[n] += va[3] * vb[n+24];
sum4[n] += va[4] * vb[n+24];
sum5[n] += va[5] * vb[n+24];
sum6[n] += va[6] * vb[n+24];
sum7[n] += va[7] * vb[n+24];
va += 8;
sum0[n] += va[0] * vb[n+32];
sum1[n] += va[1] * vb[n+32];
sum2[n] += va[2] * vb[n+32];
sum3[n] += va[3] * vb[n+32];
sum4[n] += va[4] * vb[n+32];
sum5[n] += va[5] * vb[n+32];
sum6[n] += va[6] * vb[n+32];
sum7[n] += va[7] * vb[n+32];
va += 8;
sum0[n] += va[0] * vb[n+40];
sum1[n] += va[1] * vb[n+40];
sum2[n] += va[2] * vb[n+40];
sum3[n] += va[3] * vb[n+40];
sum4[n] += va[4] * vb[n+40];
sum5[n] += va[5] * vb[n+40];
sum6[n] += va[6] * vb[n+40];
sum7[n] += va[7] * vb[n+40];
va += 8;
sum0[n] += va[0] * vb[n+48];
sum1[n] += va[1] * vb[n+48];
sum2[n] += va[2] * vb[n+48];
sum3[n] += va[3] * vb[n+48];
sum4[n] += va[4] * vb[n+48];
sum5[n] += va[5] * vb[n+48];
sum6[n] += va[6] * vb[n+48];
sum7[n] += va[7] * vb[n+48];
va += 8;
sum0[n] += va[0] * vb[n+56];
sum1[n] += va[1] * vb[n+56];
sum2[n] += va[2] * vb[n+56];
sum3[n] += va[3] * vb[n+56];
sum4[n] += va[4] * vb[n+56];
sum5[n] += va[5] * vb[n+56];
sum6[n] += va[6] * vb[n+56];
sum7[n] += va[7] * vb[n+56];
va -= 56;
}
va += 64;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n=0; n<8; n++)
{
output0[n] = sum0[n] + biasptr[0];
output1[n] = sum1[n] + biasptr[1];
output2[n] = sum2[n] + biasptr[2];
output3[n] = sum3[n] + biasptr[3];
output4[n] = sum4[n] + biasptr[4];
output5[n] = sum5[n] + biasptr[5];
output6[n] = sum6[n] + biasptr[6];
output7[n] = sum7[n] + biasptr[7];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j<N; j++)
{
const float* vb = bottom_tm.channel(j/8 + j%8);
const float* va = kernel_tm.channel(i/8);
#if __AVX__
__m256 _sum0_7 = _mm256_loadu_ps(biasptr);
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k=0;
for (; k+3<L; k=k+4)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _vb1 = _mm256_broadcast_ss(vb+1);
__m256 _vb2 = _mm256_broadcast_ss(vb+2);
__m256 _vb3 = _mm256_broadcast_ss(vb+3);
__m256 _va0 = _mm256_loadu_ps(va);
__m256 _va1 = _mm256_loadu_ps(va+8);
__m256 _va2 = _mm256_loadu_ps(va+16);
__m256 _va3 = _mm256_loadu_ps(va+24);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);// sum0 += (k00-k70) * a00
_sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1);// sum1 += (k01-k71) * a10
_sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2);// sum2 += (k02-k72) * a20
_sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3);// sum3 += (k03-k73) * a30
va += 32;
vb += 4;
}
_sum0 = _mm256_add_ps(_sum0, _sum1);
_sum2 = _mm256_add_ps(_sum2, _sum3);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum0);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum2);
for (; k<L; k++)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _va = _mm256_loadu_ps(va);
_sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7);// sum0 += (k00-k70) * a00
va += 8;
vb += 1;
}
output0[0] = _sum0_7[0];
output1[0] = _sum0_7[1];
output2[0] = _sum0_7[2];
output3[0] = _sum0_7[3];
output4[0] = _sum0_7[4];
output5[0] = _sum0_7[5];
output6[0] = _sum0_7[6];
output7[0] = _sum0_7[7];
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
float sum4 = biasptr[4];
float sum5 = biasptr[5];
float sum6 = biasptr[6];
float sum7 = biasptr[7];
for (int k=0; k<L; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
sum4 += va[4] * vb[0];
sum5 += va[5] * vb[0];
sum6 += va[6] * vb[0];
sum7 += va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i+1);
float* output2 = top_blob.channel(i+2);
float* output3 = top_blob.channel(i+3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + i : zeros;
int j=0;
for (; j+7<N; j=j+8)
{
const float* vb = bottom_tm.channel(j/8);
const float* va = kernel_tm.channel(i/8 + (i%8)/4);
#if __AVX__
__m256 _sum0 = _mm256_broadcast_ss(biasptr);
__m256 _sum1 = _mm256_broadcast_ss(biasptr+1);
__m256 _sum2 = _mm256_broadcast_ss(biasptr+2);
__m256 _sum3 = _mm256_broadcast_ss(biasptr+3);
int k=0;
for (; k+3<L; k=k+4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va+1);
__m256 _va2 = _mm256_broadcast_ss(va+2);
__m256 _va3 = _mm256_broadcast_ss(va+3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb+8);
__m256 _vb2 = _mm256_loadu_ps(vb+16);
__m256 _vb3 = _mm256_loadu_ps(vb+24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va+1);
_va2 = _mm256_broadcast_ss(va+2);
_va3 = _mm256_broadcast_ss(va+3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
va += 4;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va+1);
_va2 = _mm256_broadcast_ss(va+2);
_va3 = _mm256_broadcast_ss(va+3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
va += 4;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va+1);
_va2 = _mm256_broadcast_ss(va+2);
_va3 = _mm256_broadcast_ss(va+3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
va += 4;
vb += 32;
}
for (; k<L; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va+1);
__m256 _va2 = _mm256_broadcast_ss(va+2);
__m256 _va3 = _mm256_broadcast_ss(va+3);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
vb += 4;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
va += 4;
sum0[n] += va[0] * vb[n+8];
sum1[n] += va[1] * vb[n+8];
sum2[n] += va[2] * vb[n+8];
sum3[n] += va[3] * vb[n+8];
va += 4;
sum0[n] += va[0] * vb[n+16];
sum1[n] += va[1] * vb[n+16];
sum2[n] += va[2] * vb[n+16];
sum3[n] += va[3] * vb[n+16];
va += 4;
sum0[n] += va[0] * vb[n+24];
sum1[n] += va[1] * vb[n+24];
sum2[n] += va[2] * vb[n+24];
sum3[n] += va[3] * vb[n+24];
va += 4;
sum0[n] += va[0] * vb[n+32];
sum1[n] += va[1] * vb[n+32];
sum2[n] += va[2] * vb[n+32];
sum3[n] += va[3] * vb[n+32];
va += 4;
sum0[n] += va[0] * vb[n+40];
sum1[n] += va[1] * vb[n+40];
sum2[n] += va[2] * vb[n+40];
sum3[n] += va[3] * vb[n+40];
va += 4;
sum0[n] += va[0] * vb[n+48];
sum1[n] += va[1] * vb[n+48];
sum2[n] += va[2] * vb[n+48];
sum3[n] += va[3] * vb[n+48];
va += 4;
sum0[n] += va[0] * vb[n+56];
sum1[n] += va[1] * vb[n+56];
sum2[n] += va[2] * vb[n+56];
sum3[n] += va[3] * vb[n+56];
va -= 28;
}
va += 32;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n=0; n<8; n++)
{
output0[n] = sum0[n] + biasptr[0];
output1[n] = sum1[n] + biasptr[1];
output2[n] = sum2[n] + biasptr[2];
output3[n] = sum3[n] + biasptr[3];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j<N; j++)
{
const float* vb = bottom_tm.channel(j/8 + j%8);
const float* va = kernel_tm.channel(i/8 + (i%8)/4);
#if __AVX__
__m128 _sum0_3 = _mm_loadu_ps(biasptr);
__m128 _sum0 = _mm_set1_ps(0.0);
__m128 _sum1 = _mm_set1_ps(0.0);
__m128 _sum2 = _mm_set1_ps(0.0);
__m128 _sum3 = _mm_set1_ps(0.0);
int k=0;
for (; k+3<L; k=k+4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va+4);
__m128 _va2 = _mm_loadu_ps(va+8);
__m128 _va3 = _mm_loadu_ps(va+12);
_sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0);// sum0 += (k00-k30) * a00
_sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1);// sum1 += (k01-k31) * a10
_sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2);// sum2 += (k02-k32) * a20
_sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3);// sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k<L; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3);// sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
output0[0] = _sum0_3[0];
output1[0] = _sum0_3[1];
output2[0] = _sum0_3[2];
output3[0] = _sum0_3[3];
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int k=0; k<L; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
float* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
int j=0;
for (; j+7<N; j=j+8)
{
const float* vb = bottom_tm.channel(j/8);
const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4);
#if __AVX__
__m256 _sum0 = _mm256_broadcast_ss(&bias0);
int k=0;
for (; k+3<L; k=k+4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va+1);
__m256 _va2 = _mm256_broadcast_ss(va+2);
__m256 _va3 = _mm256_broadcast_ss(va+3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb+8);
__m256 _vb2 = _mm256_loadu_ps(vb+16);
__m256 _vb3 = _mm256_loadu_ps(vb+24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01
_sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02
_sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03
va += 4;
vb += 32;
}
for (; k<L; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
va += 1;
vb += 4;
}
_mm256_storeu_ps(output, _sum0);
#else
float sum[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum[n] += va[0] * vb[n];
sum[n] += va[1] * vb[n+8];
sum[n] += va[2] * vb[n+16];
sum[n] += va[3] * vb[n+24];
sum[n] += va[4] * vb[n+32];
sum[n] += va[5] * vb[n+40];
sum[n] += va[6] * vb[n+48];
sum[n] += va[7] * vb[n+56];
}
va += 8;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n=0; n<8; n++)
{
output[n] = sum[n] + bias0;
}
#endif // __AVX__
output += 8;
}
for (; j<N; j++)
{
const float* vb = bottom_tm.channel(j/8 + j%8);
const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4);
int k=0;
#if __AVX__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k+3<L; k+=4)
{
__m128 _p0 = _mm_loadu_ps(vb);
vb += 4;
__m128 _k0 = _mm_loadu_ps(va);
va += 4;
_sum0 = _mm_fmadd_ps(_p0, _k0, _sum0);
}
float sum0 = bias0 + _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#else
float sum0 = bias0;
#endif // __AVX__
for (; k<L; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
}
#else
static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size)
{
const float* kernel = _kernel;
// kernel memory packed 4 x 4
kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const float* k0 = kernel + (p+0)*inch*kernel_size;
const float* k1 = kernel + (p+1)*inch*kernel_size;
const float* k2 = kernel + (p+2)*inch*kernel_size;
const float* k3 = kernel + (p+3)*inch*kernel_size;
float* ktmp = kernel_tm.channel(p/4);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p=remain_outch_start; p<outch; p++)
{
const float* k0 = kernel + (p+0)*inch*kernel_size;
float* ktmp = kernel_tm.channel(p/4 + p%4);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv_im2col_sgemm_sse(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
// im2col
Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator);
{
const int stride = kernel_h*kernel_w*outw*outh;
float* ret = (float*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<inch; p++)
{
const float* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// bottom_im2col memory packed 4 x 4
Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, elemsize, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i/4);
for (int q=0; q<inch*kernel_size; q++)
{
#if __SSE__
_mm_storeu_ps(tmpptr, _mm_loadu_ps(img0));
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
#endif // __SSE__
tmpptr += 4;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i/4 + i%4);
for (int q=0; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
// sgemm(int M, int N, int L, float* A, float* B, float* C)
{
//int M = outch; // outch
int N = outw * outh; // outsize or out stride
int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i+1);
float* output2 = top_blob.channel(i+2);
float* output3 = top_blob.channel(i+3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + i : zeros;
int j=0;
for (; j+3<N; j=j+4)
{
const float* vb = bottom_tm.channel(j/4);
const float* va = kernel_tm.channel(i/4);
#if __SSE__
__m128 _sum0 = _mm_set1_ps(biasptr[0]);
__m128 _sum1 = _mm_set1_ps(biasptr[1]);
__m128 _sum2 = _mm_set1_ps(biasptr[2]);
__m128 _sum3 = _mm_set1_ps(biasptr[3]);
int k=0;
for (; k+3<L; k=k+4)
{
// k0
__m128 _vb = _mm_loadu_ps(vb);
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30
// k1
_vb = _mm_loadu_ps(vb+4);
_va0 = _mm_set1_ps(va[4]);
_va1 = _mm_set1_ps(va[5]);
_va2 = _mm_set1_ps(va[6]);
_va3 = _mm_set1_ps(va[7]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a10-a13) * k01
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a10-a13) * k11
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a10-a13) * k21
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a10-a13) * k31
// k2
_vb = _mm_loadu_ps(vb+8);
_va0 = _mm_set1_ps(va[8]);
_va1 = _mm_set1_ps(va[9]);
_va2 = _mm_set1_ps(va[10]);
_va3 = _mm_set1_ps(va[11]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a20-a23) * k02
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a20-a23) * k12
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a20-a23) * k22
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a20-a23) * k32
// k3
_vb = _mm_loadu_ps(vb+12);
_va0 = _mm_set1_ps(va[12]);
_va1 = _mm_set1_ps(va[13]);
_va2 = _mm_set1_ps(va[14]);
_va3 = _mm_set1_ps(va[15]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a30-a33) * k03
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a30-a33) * k13
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a30-a33) * k23
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a30-a33) * k33
va += 16;
vb += 16;
}
for (; k<L; k++)
{
// k0
__m128 _vb = _mm_loadu_ps(vb);
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30
va += 4;
vb += 4;
}
_mm_storeu_ps(output0, _sum0);
_mm_storeu_ps(output1, _sum1);
_mm_storeu_ps(output2, _sum2);
_mm_storeu_ps(output3, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<4; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
va += 4;
sum0[n] += va[0] * vb[n+4];
sum1[n] += va[1] * vb[n+4];
sum2[n] += va[2] * vb[n+4];
sum3[n] += va[3] * vb[n+4];
va += 4;
sum0[n] += va[0] * vb[n+8];
sum1[n] += va[1] * vb[n+8];
sum2[n] += va[2] * vb[n+8];
sum3[n] += va[3] * vb[n+8];
va += 4;
sum0[n] += va[0] * vb[n+12];
sum1[n] += va[1] * vb[n+12];
sum2[n] += va[2] * vb[n+12];
sum3[n] += va[3] * vb[n+12];
va += 4;
sum0[n] += va[0] * vb[n+16];
sum1[n] += va[1] * vb[n+16];
sum2[n] += va[2] * vb[n+16];
sum3[n] += va[3] * vb[n+16];
va += 4;
sum0[n] += va[0] * vb[n+20];
sum1[n] += va[1] * vb[n+20];
sum2[n] += va[2] * vb[n+20];
sum3[n] += va[3] * vb[n+20];
va += 4;
sum0[n] += va[0] * vb[n+24];
sum1[n] += va[1] * vb[n+24];
sum2[n] += va[2] * vb[n+24];
sum3[n] += va[3] * vb[n+24];
va += 4;
sum0[n] += va[0] * vb[n+28];
sum1[n] += va[1] * vb[n+28];
sum2[n] += va[2] * vb[n+28];
sum3[n] += va[3] * vb[n+28];
va -= 28;
}
va += 32;
vb += 32;
}
for (; k<L; k++)
{
for (int n=0; n<4; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n=0; n<4; n++)
{
output0[n] = sum0[n] + biasptr[0];
output1[n] = sum1[n] + biasptr[1];
output2[n] = sum2[n] + biasptr[2];
output3[n] = sum3[n] + biasptr[3];
}
#endif // __SSE__
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j<N; j++)
{
const float* vb = bottom_tm.channel(j/4 + j%4);
const float* va = kernel_tm.channel(i/4);
#if __SSE__
__m128 _sum0_3 = _mm_loadu_ps(biasptr);
__m128 _sum0 = _mm_set1_ps(0.0);
__m128 _sum1 = _mm_set1_ps(0.0);
__m128 _sum2 = _mm_set1_ps(0.0);
__m128 _sum3 = _mm_set1_ps(0.0);
int k=0;
for (; k+3<L; k=k+4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va+4);
__m128 _va2 = _mm_loadu_ps(va+8);
__m128 _va3 = _mm_loadu_ps(va+12);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));// sum0 += (k00-k30) * a00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1));// sum1 += (k01-k31) * a10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2));// sum2 += (k02-k32) * a20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3));// sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k<L; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0));// sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
output0[0] = _sum0_3[0];
output1[0] = _sum0_3[1];
output2[0] = _sum0_3[2];
output3[0] = _sum0_3[3];
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int k=0; k<L; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __SSE__
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
float* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
int j=0;
for (; j+3<N; j=j+4)
{
const float* vb = bottom_tm.channel(j/4);
const float* va = kernel_tm.channel(i/4 + i%4);
#if __SSE__
__m128 _sum0 = _mm_set1_ps(bias0);
int k=0;
for (; k+3<L; k=k+4)
{
// k0
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
__m128 _vb0 = _mm_loadu_ps(vb);
__m128 _vb1 = _mm_loadu_ps(vb+4);
__m128 _vb2 = _mm_loadu_ps(vb+8);
__m128 _vb3 = _mm_loadu_ps(vb+12);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0));// sum0 = (a00-a03) * k00
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1));// sum0 += (a10-a13) * k01
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2));// sum0 += (a20-a23) * k02
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3));// sum0 += (a30-a33) * k03
va += 4;
vb += 16;
}
for (; k<L; k++)
{
// k0
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _vb0 = _mm_loadu_ps(vb);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00
va += 1;
vb += 4;
}
_mm_storeu_ps(output, _sum0);
#else
float sum[4] = {0};
int k=0;
for (; k+3<L; k=k+4)
{
for (int n=0; n<4; n++)
{
sum[n] += va[0] * vb[n];
sum[n] += va[1] * vb[n+4];
sum[n] += va[2] * vb[n+8];
sum[n] += va[3] * vb[n+12];
sum[n] += va[4] * vb[n+16];
sum[n] += va[5] * vb[n+20];
sum[n] += va[6] * vb[n+24];
sum[n] += va[7] * vb[n+28];
}
va += 8;
vb += 32;
}
for (; k<L; k++)
{
for (int n=0; n<4; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n=0; n<4; n++)
{
output[n] = sum[n] + bias0;
}
#endif // __SSE__
output += 4;
}
for (; j<N; j++)
{
const float* vb = bottom_tm.channel(j/4 + j%4);
const float* va = kernel_tm.channel(i/4 + i%4);
int k=0;
#if __SSE__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k+3<L; k+=4)
{
__m128 _p0 = _mm_loadu_ps(vb);
__m128 _k0 = _mm_loadu_ps(va);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0));
va += 4;
vb += 4;
}
float sum0 = bias0 + _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#else
float sum0 = bias0;
#endif // __SSE__
for (; k<L; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
}
#endif
|
rose_v1_plusAssign.c | #include <omp.h>
int main(int argc,char *argv[])
{
int i;
int j;
double a[20][20];
// memset(a,0,(sizeof(a)));
for (i = 0; i <= 18; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 19; j += 1) {
a[i][j] += a[i + 1][j];
}
}
return 0;
}
|
laplace2d.c | /*
* Copyright 2012 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <string.h>
#include <openacc.h>
#include "timer.h"
#define NN 4096
#define NM 4096
double A[NN][NM];
double Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
while ( error > tol && iter < iter_max )
{
error = 0.0;
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
}
|
matmul.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 3000 /* number of rows in matrix A */
int main (int argc, char *argv[])
{
int i, j, k, chunk;
double a[N][N], /* matrix A to be multiplied */
b[N][N], /* matrix B to be multiplied */
c[N][N]; /* result matrix C */
chunk=10;
#pragma omp parallel for private(i,j,k) schedule(static,chunk)
for (i=0; i<N; i++)
for (j=0; j<N; j++){
a[i][j]= i+j;
b[i][j]= i*j;
c[i][j]= 0;
}
/*** Do matrix multiply sharing iterations on outer loop ***/
#pragma omp parallel for private(i,j,k) schedule(static,chunk)
for (i=0; i<N; i++)
for(k=0; k<N; k++)
for (j=0; j<N; j++)
c[i][j] += a[i][k] * b[k][j];
/****Print the last element of the loop****/
printf("%lf\n",c[N-1][N-1]);
}
|
mandelbrot.c |
/*=======================
M A N D E L B R O T
=======================*/
// Implementation Based on Rosetta Code Example
// 1) Draws Mandelbrot set for Fc(z)=z*z +c using
// Mandelbrot algorithm (boolean escape time).
// 2) Technique of creating ppm file is based on
// the code of Claudio Rocchini. http://en.
// wikipedia.org/wiki/Image:Color_complex_plot
// .jpg. Create 24 bit color graphic file,
// portable pixmap file = PPM, see http://en.
// wikipedia.org/wiki/Portable_pixmap to see
// the file use external application (graphic
// viewer).
// Inclusions
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
// Definitions
#define MAX_COLOR_COMPONENT_VALUE 255
#define I_MAX 200
#define CXMIN -1.5
#define CXMAX 0.5
#define CYMIN -1.0
#define CYMAX 1.0
#define ESCAPE_RADIUS_2 4.0
// Image Structure Definition
typedef struct {
unsigned char color[3];
} image;
// Function Prototypes
void instructions( int argc, char** argv );
// Main
int main ( int argc, char **argv ) {
instructions( argc, argv ); // Display Usage Instructions If Wrong Arguments
double time_start, time_end;
time_start = omp_get_wtime();
char *filename = argv[1]; // Parse Input Arguments
unsigned int iXMax = atoi( argv[2] ); // Generated Image Width
unsigned int iYMax = iXMax; // Generated Image Height
unsigned int display = atoi( argv[3] ); // Argument to Display Debug Text
unsigned int i = 0; // Iteration Number
unsigned int iX = 0; // Screen (Integer) X Coordinate
unsigned int iY = 0; // Screen (Integer) Y Coordinate
unsigned int thisPixelNum = 0; // Iterator for Tracking Pixel Number
double cX = 0.0; // World (Double) X Coordinate
double cY = 0.0; // World (Double) Y Coordinate
double zX = 0.0; // Z = Zx + Zy * i; Z0 = 0
double zY = 0.0; // (see just above)
double zX2 = 0.0; // Square of Zx
double zY2 = 0.0; // Square of Zy
char *comment = "# "; // Dynamic File Header Comment
// Intro Text and Setup
if( display ) {
printf( "\n = = = Mandelbrot Set Generator = = = \n\n" );
}
unsigned int size = iXMax * iYMax; // Determination of Size
double pixelWidth = ( CXMAX - CXMIN ) / iXMax; // Determination of Pixel Width/
double pixelHeight = ( CYMAX - CYMIN ) / iYMax; // Height from Window/Size
image *fractal = malloc( size * sizeof( *fractal ) ); // Allocate Storage for Image
// Compute Fractal Image
if( display ) {
printf( "Generating Mandelbrot Set...\n" );
}
#pragma omp parallel for private(iY, iX, cY, cX, zY, zX, zY2, zX2, i, thisPixelNum) shared (iYMax, iXMax, pixelHeight, pixelWidth, fractal) default (none) schedule(guided)
for( iY = 0; iY < iYMax; iY++ ) { // Iterate Through Image Rows
cY = CYMIN + iY * pixelHeight;
if( fabs( cY ) < ( pixelHeight / 2 ) ) {
cY = 0.0; // Main Antenna
}
for( iX = 0; iX < iXMax; iX++ ) { // Iterate Through Image Columns
cX = CXMIN + iX * pixelWidth;
zX = 0.0; // Initial Value of Orbit - Critical Point Z = 0
zY = 0.0;
zX2 = zX * zX;
zY2 = zY * zY;
for( i = 0; ( i < I_MAX ) && ( ( zX2 + zY2 ) < ESCAPE_RADIUS_2 ); i++ ) {
zY = 2 * zX * zY + cY;
zX = zX2 - zY2 + cX;
zX2 = zX * zX;
zY2 = zY * zY;
};
// Save Pixel Color
thisPixelNum = iY * iYMax + iX; // Where is this pixel in the image?
if( i == I_MAX ) { // Color for Interior of Mandelbrot Set
fractal[thisPixelNum].color[0] = 37; // Red
fractal[thisPixelNum].color[1] = 37; // Green
fractal[thisPixelNum].color[2] = 37; // Blue
} else { // Color for Exterior of Mandelbrot Set
fractal[thisPixelNum].color[0] = 0; // Red
fractal[thisPixelNum].color[1] = 0; // Green
fractal[thisPixelNum].color[2] = 255; // Blue
} // End If
} // End iX For
} // End iY For
// Image File Write Phase
if( display ) {
printf( "Writing File Out...\n" );
}
// Create New File - give it a name and open it in binary mode.
FILE *filePtr = fopen( filename, "wb" ); // b - Binary Mode
// Write ASCII Header to the File
fprintf( filePtr, "P6\n %s\n %d\n %d\n %d\n", comment, iXMax, iYMax, MAX_COLOR_COMPONENT_VALUE );
// Image File Write Out - must be done serially.
for( iY = 0; iY < iYMax; iY++ ) {
for( iX = 0; iX < iXMax; iX++ ) {
thisPixelNum = iY * iYMax + iX; // Set Dereference Pixel Location
fwrite( fractal[thisPixelNum].color, 1, 3, filePtr ); // Write Pixel Color to File
}
}
// Final Tasks
fclose( filePtr );
free( fractal );
if( display ) {
printf( "Operation Complete!\n\n" );
}
time_end = omp_get_wtime();
printf("Execution Time (s): %f\n", time_end-time_start );
return EXIT_SUCCESS;
}
// Function Implementations
// Instructions - display usage instructions if argument count incorrent.
void instructions( int argc, char** argv ) {
if( argc != 4 ) {
printf( "\nUsage: %s <output> <x/y> <display>\n", argv[0] );
printf( " Output - a .ppm image to output with the fractal.\n" );
printf( " X/Y - width and height of image in pixels.\n" );
printf( " Display - 1 displays debug text, 0 just displays time values for raw data tables.\n\n" );
exit( EXIT_FAILURE );
}
}
// End mandelbrot.c - EWG SDG
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=(double) MagickMin(GetPixelChannels(image),
GetPixelChannels(reconstruct_image))*
GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
distance,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
difference=MagickFalse;
distance=0.0;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
register ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
register const Quantum
*magick_restrict reference,
*magick_restrict target;
register MagickRealType
*k;
ssize_t
v;
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
register ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=((double) columns*rows);
}
distortion[CompositePixelChannel]/=((double) columns*rows);
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
mmc.c | /*******************************************************************
*
* M4RI: Linear Algebra over GF(2)
*
* Copyright (C) 2007, 2008 Gregory Bard <bard@fordham.edu>
* Copyright (C) 2008 Martin Albrecht <M.R.Albrecht@rhul.ac.uk>
*
* Distributed under the terms of the GNU General Public License (GPL)
* version 2 or higher.
*
* This code is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full text of the GPL is available at:
*
* http://www.gnu.org/licenses/
*
********************************************************************/
#ifdef M4RI_HAVE_CONFIG_H
#include "config.h"
#endif
#include "m4ri/mmc.h"
#if __M4RI_ENABLE_MMC
/**
* The actual memory block cache.
*/
mmb_t m4ri_mmc_cache[__M4RI_MMC_NBLOCKS];
#endif // __M4RI_ENABLE_MMC
/**
* \brief Allocate size bytes.
*
* \param size Number of bytes.
*
* \return pointer to allocated memory block.
*/
void *m4ri_mmc_malloc(size_t size) {
#if __M4RI_ENABLE_MMC
void *ret = NULL;
#if __M4RI_HAVE_OPENMP
#pragma omp critical (mmc)
{
#endif
mmb_t *mm = m4ri_mmc_cache;
if (size <= __M4RI_MMC_THRESHOLD) {
for (int i = 0; i < __M4RI_MMC_NBLOCKS; ++i) {
if(mm[i].size == size) {
ret = mm[i].data;
mm[i].data = NULL;
mm[i].size = 0;
break;
}
}
}
#if __M4RI_HAVE_OPENMP
}
#endif
if (ret)
return ret;
else
return m4ri_mm_malloc(size);
#else // __M4RI_ENABLE_MMC
return m4ri_mm_malloc(size);
#endif // __M4RI_ENABLE_MMC
}
/**
* \brief Free the data pointed to by condemned of the given size.
*
* \param condemned Pointer to memory.
* \param size Number of bytes.
*/
void m4ri_mmc_free(void *condemned, size_t size) {
#if __M4RI_ENABLE_MMC
#if __M4RI_HAVE_OPENMP
#pragma omp critical (mmc)
{
#endif
static int j = 0;
mmb_t *mm = m4ri_mmc_cache;
if (size < __M4RI_MMC_THRESHOLD) {
for(int i = 0; i < __M4RI_MMC_NBLOCKS; ++i) {
if(mm[i].size == 0) {
mm[i].size = size;
mm[i].data = condemned;
goto done;
}
}
m4ri_mm_free(mm[j].data);
mm[j].size = size;
mm[j].data = condemned;
j = (j+1) % __M4RI_MMC_NBLOCKS;
} else {
m4ri_mm_free(condemned);
}
done:
;
#if __M4RI_HAVE_OPENMP
}
#endif // __M4RI_HAVE_OPENMP
#else // __M4RI_ENABLE_MMC
m4ri_mm_free(condemned);
#endif // __M4RI_ENABLE_MMC
}
/**
* \brief Cleans up memory block cache.
*
* This function is called automatically when the shared library is unloaded.
*
* \warning Not thread safe.
*/
void m4ri_mmc_cleanup(void) {
#if __M4RI_ENABLE_MMC
#if __M4RI_HAVE_OPENMP
#pragma omp critical (mmc)
{
#endif
mmb_t *mm = m4ri_mmc_cache;
for(int i = 0; i < __M4RI_MMC_NBLOCKS; ++i) {
if (mm[i].size)
m4ri_mm_free(mm[i].data);
mm[i].size = 0;
}
#if __M4RI_HAVE_OPENMP
}
#endif // __M4RI_HAVE_OPENMP
#endif // __M4RI_ENABLE_MMC
}
|
interppotential_calc_potential.c | /*
C code for calculating a potential and its forces on a grid
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define CHUNKSIZE 1
//Potentials
#include <galpy_potentials.h>
#include <actionAngle.h>
#include <integrateFullOrbit.h>
#include <interp_2d.h>
#include <cubic_bspline_2d_coeffs.h>
/*
MAIN FUNCTIONS
*/
void calc_potential(int nR,
double *R,
int nz,
double *z,
int npot,
int * pot_type,
double * pot_args,
double *out,
int * err){
int ii, jj, tid, nthreads;
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
double * row= (double *) malloc ( nthreads * nz * ( sizeof ( double ) ) );
//Set up the potentials
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_actionAngleArgs(npot,potentialArgs,pot_type,pot_args);
//Run through the grid and calculate
UNUSED int chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) private(ii,tid,jj) \
shared(row,npot,potentialArgs,R,z,nR,nz)
for (ii=0; ii < nR; ii++){
#ifdef _OPENMP
tid= omp_get_thread_num();
#else
tid = 0;
#endif
for (jj=0; jj < nz; jj++){
*(row+jj+tid*nz)= evaluatePotentials(*(R+ii),*(z+jj),npot,potentialArgs);
}
put_row(out,ii,row+tid*nz,nz);
}
for (ii=0; ii < npot; ii++) {
free((potentialArgs+ii)->args);
}
free(potentialArgs);
free(row);
}
void calc_rforce(int nR,
double *R,
int nz,
double *z,
int npot,
int * pot_type,
double * pot_args,
double *out,
int * err){
int ii, jj, tid, nthreads;
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
double * row= (double *) malloc ( nthreads * nz * ( sizeof ( double ) ) );
//Set up the potentials
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_leapFuncArgs_Full(npot,potentialArgs,pot_type,pot_args);
//Run through the grid and calculate
UNUSED int chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) private(ii,tid,jj) \
shared(row,npot,potentialArgs,R,z,nR,nz)
for (ii=0; ii < nR; ii++){
#ifdef _OPENMP
tid= omp_get_thread_num();
#else
tid = 0;
#endif
for (jj=0; jj < nz; jj++){
*(row+jj+tid*nz)= calcRforce(*(R+ii),*(z+jj),0.,0.,npot,potentialArgs);
}
put_row(out,ii,row+tid*nz,nz);
}
for (ii=0; ii < npot; ii++) {
free((potentialArgs+ii)->args);
}
free(potentialArgs);
free(row);
}
void calc_zforce(int nR,
double *R,
int nz,
double *z,
int npot,
int * pot_type,
double * pot_args,
double *out,
int * err){
int ii, jj, tid, nthreads;
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
double * row= (double *) malloc ( nthreads * nz * ( sizeof ( double ) ) );
//Set up the potentials
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_leapFuncArgs_Full(npot,potentialArgs,pot_type,pot_args);
//Run through the grid and calculate
UNUSED int chunk= CHUNKSIZE;
#pragma omp parallel for schedule(static,chunk) private(ii,tid,jj) \
shared(row,npot,potentialArgs,R,z,nR,nz)
for (ii=0; ii < nR; ii++){
#ifdef _OPENMP
tid= omp_get_thread_num();
#else
tid = 0;
#endif
for (jj=0; jj < nz; jj++){
*(row+jj+tid*nz)= calczforce(*(R+ii),*(z+jj),0.,0.,npot,potentialArgs);
}
put_row(out,ii,row+tid*nz,nz);
}
for (ii=0; ii < npot; ii++) {
free((potentialArgs+ii)->args);
}
free(potentialArgs);
free(row);
}
void eval_potential(int nR,
double *R,
double *z,
int npot,
int * pot_type,
double * pot_args,
double *out,
int * err){
int ii;
//Set up the potentials
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_actionAngleArgs(npot,potentialArgs,pot_type,pot_args);
//Run through and evaluate
for (ii=0; ii < nR; ii++){
*(out+ii)= evaluatePotentials(*(R+ii),*(z+ii),npot,potentialArgs);
}
for (ii=0; ii < npot; ii++) {
if ( (potentialArgs+ii)->i2d )
interp_2d_free((potentialArgs+ii)->i2d) ;
if ((potentialArgs+ii)->accx )
gsl_interp_accel_free ((potentialArgs+ii)->accx);
if ((potentialArgs+ii)->accy )
gsl_interp_accel_free ((potentialArgs+ii)->accy);
free((potentialArgs+ii)->args);
}
free(potentialArgs);
}
void eval_rforce(int nR,
double *R,
double *z,
int npot,
int * pot_type,
double * pot_args,
double *out,
int * err){
int ii;
//Set up the potentials
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_leapFuncArgs_Full(npot,potentialArgs,pot_type,pot_args);
//Run through and evaluate
for (ii=0; ii < nR; ii++){
*(out+ii)= calcRforce(*(R+ii),*(z+ii),0.,0.,npot,potentialArgs);
}
for (ii=0; ii < npot; ii++) {
if ( (potentialArgs+ii)->i2drforce )
interp_2d_free((potentialArgs+ii)->i2drforce) ;
if ((potentialArgs+ii)->accxrforce )
gsl_interp_accel_free ((potentialArgs+ii)->accxrforce );
if ((potentialArgs+ii)->accyrforce )
gsl_interp_accel_free ((potentialArgs+ii)->accyrforce );
if ( (potentialArgs+ii)->i2dzforce )
interp_2d_free((potentialArgs+ii)->i2dzforce) ;
if ((potentialArgs+ii)->accxzforce )
gsl_interp_accel_free ((potentialArgs+ii)->accxzforce );
if ((potentialArgs+ii)->accyzforce )
gsl_interp_accel_free ((potentialArgs+ii)->accyzforce );
free((potentialArgs+ii)->args);
}
free(potentialArgs);
}
void eval_zforce(int nR,
double *R,
double *z,
int npot,
int * pot_type,
double * pot_args,
double *out,
int * err){
int ii;
//Set up the potentials
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_leapFuncArgs_Full(npot,potentialArgs,pot_type,pot_args);
//Run through and evaluate
for (ii=0; ii < nR; ii++){
*(out+ii)= calczforce(*(R+ii),*(z+ii),0.,0.,npot,potentialArgs);
}
for (ii=0; ii < npot; ii++) {
if ( (potentialArgs+ii)->i2drforce )
interp_2d_free((potentialArgs+ii)->i2drforce) ;
if ((potentialArgs+ii)->accxrforce )
gsl_interp_accel_free ((potentialArgs+ii)->accxrforce );
if ((potentialArgs+ii)->accyrforce )
gsl_interp_accel_free ((potentialArgs+ii)->accyrforce );
if ( (potentialArgs+ii)->i2dzforce )
interp_2d_free((potentialArgs+ii)->i2dzforce) ;
if ((potentialArgs+ii)->accxzforce )
gsl_interp_accel_free ((potentialArgs+ii)->accxzforce );
if ((potentialArgs+ii)->accyzforce )
gsl_interp_accel_free ((potentialArgs+ii)->accyzforce );
free((potentialArgs+ii)->args);
}
free(potentialArgs);
}
|
vednnConvolutionBackwardFilter.c | #include "vednnConvolutionBackwardFilter.h"
#include "vednn-def.h"
#include <stdint.h>
#include <stdio.h>
static inline vednnError_t
vednnConvolutionBackwardFilter_wrapper(
vednnConvBackwardFilter_t pFunc,
VEDNN_CONVBKF_ARGS )
{
#ifndef VEDNN_USE_OPENMP
return pFunc( VEDNN_CONVBKF_ARGS_LIST );
#else // VEDNN_USE_OPENMP
#ifndef VEDNN_OMP_GROUP_PARALLEL
if ( __vednn_omp_num_threads == 1 ) {
int64_t gOutChannel = pParamGradOut->channel;
int64_t group = pParamConv->group;
int64_t gOutChannelGroup = gOutChannel / group;
return pFunc(VEDNN_CONVBKF_ARGS_LIST, 0, gOutChannelGroup);
}
else {
vednnError_t rc = VEDNN_SUCCESS ;
#pragma omp parallel reduction(|:rc)
{
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t gOutChannel = pParamGradOut->channel;
int64_t group = pParamConv->group;
int64_t gOutChannelGroup = gOutChannel / group;
int64_t nOChannlel = gOutChannelGroup / nthreads ;
int64_t remain = gOutChannelGroup % nthreads ;
int64_t beginOChannel = nOChannlel * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myOChannel = nOChannlel + ( threadid < remain ? 1 : 0 ) ;
if( myOChannel == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
rc |= pFunc(VEDNN_CONVBKF_ARGS_LIST, beginOChannel, myOChannel );
}
}
return rc ;
}
#else // VEDNN_OMP_GROUP_PARALLEL
if ( __vednn_omp_num_threads == 1 ) {
int64_t gOutChannel = pParamGradOut->channel;
int64_t group = pParamConv->group;
int64_t gOutChannelGroup = gOutChannel / group;
return pFunc(VEDNN_CONVBKF_ARGS_LIST, 0, gOutChannelGroup, 0, group);
}
else {
vednnError_t rc = VEDNN_SUCCESS ;
#pragma omp parallel reduction(|:rc)
{
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t gOutChannel = pParamGradOut->channel;
int64_t group = pParamConv->group;
int64_t gOutChannelGroup = gOutChannel / group;
if( gOutChannelGroup >= group )
{
int64_t nOChannlel = gOutChannelGroup / nthreads ;
int64_t remain = gOutChannelGroup % nthreads ;
int64_t beginOChannel = nOChannlel * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myOChannel = nOChannlel + ( threadid < remain ? 1 : 0 ) ;
if( myOChannel == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
rc |= pFunc(VEDNN_CONVBKF_ARGS_LIST, beginOChannel, myOChannel, 0, group);
}
}
else {
int64_t nGroup = group / nthreads ;
int64_t remain = group % nthreads ;
int64_t beginGroup = nGroup * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myGroup = nGroup + ( threadid < remain ? 1 : 0 ) ;
if( myGroup == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
rc |= pFunc(VEDNN_CONVBKF_ARGS_LIST, 0, gOutChannelGroup, beginGroup, myGroup);
}
}
}
return rc ;
}
#endif // VEDNN_OMP_GROUP_PARALLEL
#endif // VEDNN_USE_OMP
}
/* ----------------------------------------------------------------------- */
vednnError_t vednnConvolutionBackwardFilter(
const vednnTensorParam_t *pParamIn,
const void *pDataIn,
const vednnTensorParam_t *pParamGradOut,
const void *pDataGradOut,
const vednnFilterParam_t *pParamGradKernel,
void *pDataGradKernel,
const vednnConvolutionParam_t *pParamConv,
vednnConvolutionAlgorithm_t algo
)
{
switch( pParamGradKernel->layout ) {
case VEDNN_FILTER_LAYOUT_NCHW :
break ;
case VEDNN_FILTER_LAYOUT_HWCN :
if( pParamConv->group > 1 ) {
fprintf(stderr, "[VEDNN ERROR] VEDNN does not support grouped convolution with filter_hwcn\n") ;
return VEDNN_ERROR_INVALID_PARAM ;
}
break ;
default :
fprintf(stderr, "[VEDNN ERROR] Unknown Filter Layout %d\n", pParamGradKernel->layout) ;
return VEDNN_ERROR_INVALID_PARAM ;
}
if (algo == VEDNN_CONV_ALGORITHM_DIRECT)
{
#define OMPWRAP( IMPL ) WRAP_RET(vednnConvolutionBackwardFilter_direct_##IMPL, \
vednnConvolutionBackwardFilter_wrapper, VEDNN_CONVBKF_ARGS_LIST )
#define DIL(N) (pParamConv->dilationHeight == (N) && pParamConv->dilationWidth == (N))
#define PAD(N) (pParamConv->padHeight == (N) && pParamConv->padWidth == (N))
#define STR(N) (pParamConv->strideHeight == (N) && pParamConv->strideWidth == (N))
#define KER(N) (pParamGradKernel->width == (N) && pParamGradKernel->height == (N))
#define IWU(N) (pParamIn->width <= (N))
#define OWU(N) (pParamGradOut->width <= (N))
#define OHWU(N) (pParamGradOut->width * pParamGradOut->height <= (N))
if ( pParamGradOut->height * pParamGradOut->width <= 16 ||
( pParamGradOut->height * pParamGradOut->width < 64
&& pParamGradOut->height * pParamGradOut->width < pParamIn->channel / pParamConv->group ) ) {
OMPWRAP(vecC);
}else if (STR(1) && DIL(1)
&& pParamIn->height == pParamGradOut->height
&& pParamIn->width == pParamGradOut->width ) // d1s1pS
{
if (KER(3)) {
if (OHWU(256)) OMPWRAP(dil1_str1_padsame_ker3_ohwU256);
else if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker3_owU128);
else OMPWRAP(dil1_str1_padsame_ker3);
}else if (KER(1)) { OMPWRAP(dil1_str1_padsame_ker1);
}else if (KER(5)) {
if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker5_owU128);
else OMPWRAP(dil1_str1_padsame_ker5);
}else if (KER(2)) {
if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker2_owU128);
else OMPWRAP(dil1_str1_padsame_ker2);
}
OMPWRAP(dil1_str1_padsame);
}
else if (DIL(1) && PAD(0)
&& pParamGradOut->height == (pParamIn->height - pParamGradKernel->height) / pParamConv->strideHeight + 1
&& pParamGradOut->width == (pParamIn->width - pParamGradKernel->width) / pParamConv->strideWidth + 1 )
{ // d1p0 and oh,ow correct for whatever stride
if (KER(3) && STR(1) && IWU(256)
&& (pParamIn->width & 0x01) == 0 && (pParamGradOut->width & 0x01) == 0
&& (((uint64_t)pDataIn) & 0x07) == 0 && (((uint64_t)pDataGradOut) & 0x07) == 0 )
{
OMPWRAP(dil1_str1_pad0_ker3_ow2X_iw2XU256_igoaligned);
}
else if (KER(3) && OWU(128))
{
if (STR(1)) OMPWRAP(dil1_str1_pad0_ker3_owU128);
else OMPWRAP(dil1_pad0_ker3_owU128);
}
else if (KER(1))
{
if (OHWU(64)) OMPWRAP(dil1_pad0_ker1_ohwU64);
else if (OHWU(128)) OMPWRAP(dil1_pad0_ker1_ohwU128);
else if (OWU(32)) OMPWRAP(dil1_pad0_ker1_owU32);
else OMPWRAP(dil1_pad0_ker1);
}
else if (KER(4) && OWU(128) && STR(1)) OMPWRAP(dil1_str1_pad0_ker4_owU128);
else if (OWU(32)) OMPWRAP(dil1_pad0_owU32);
OMPWRAP(dil1_pad0);
}
else if(OWU(128))
{
if (KER(3)) {
if (STR(2) && DIL(1) && PAD(1)) OMPWRAP(dil1_str2_pad1_ker3_owU128) ;
else OMPWRAP(ker3_owU128) ;
}else if (KER(4) && STR(2) && DIL(1) && PAD(1))
OMPWRAP(dil1_str2_pad1_ker4_owU128) ;
OMPWRAP(owU128);
}
OMPWRAP(default);
}
else {
return VEDNN_ERROR_INVALID_PARAM ;
}
#undef OHWU
#undef OWU
#undef IWU
#undef KER
#undef STR
#undef PAD
#undef DIL
#undef OMPWRAP
}
// vim: et sw=2 ts=2
|
convolution_3x3_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd43_transform_kernel_pack8to4_int8_msa(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-8a-inch/8a-36-outch/4b
kernel_tm_pack8.create(inch / 8, 36, outch / 4, (size_t)2u * 32, 32);
int q = 0;
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
Mat kernel_tm = kernel_tm_pack8.channel(q / 4);
for (int k = 0; k < 36; k++)
{
short* g00 = kernel_tm.row<short>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const short* k00 = k0.row<const short>(p + i);
const short* k10 = k1.row<const short>(p + i);
const short* k20 = k2.row<const short>(p + i);
const short* k30 = k3.row<const short>(p + i);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
}
}
static void conv3x3s1_winograd43_pack8to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
v16i8 _r00_01 = __msa_ld_b(r0, 0);
v16i8 _r02_03 = __msa_ld_b(r0 + 16, 0);
v16i8 _r04_05 = __msa_ld_b(r0 + 32, 0);
v16i8 _extr0001 = __msa_clti_s_b(_r00_01, 0);
v16i8 _extr0203 = __msa_clti_s_b(_r02_03, 0);
v16i8 _extr0405 = __msa_clti_s_b(_r04_05, 0);
v8i16 _r00 = (v8i16)__msa_ilvr_b(_extr0001, _r00_01);
v8i16 _r01 = (v8i16)__msa_ilvl_b(_extr0001, _r00_01);
v8i16 _r02 = (v8i16)__msa_ilvr_b(_extr0203, _r02_03);
v8i16 _r03 = (v8i16)__msa_ilvl_b(_extr0203, _r02_03);
v8i16 _r04 = (v8i16)__msa_ilvr_b(_extr0405, _r04_05);
v8i16 _r05 = (v8i16)__msa_ilvl_b(_extr0405, _r04_05);
v8i16 _v5 = __msa_fill_h(5);
v8i16 _tmp0m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r00, 2), _r04), __msa_mulv_h(_r02, _v5));
v8i16 _tmp1m = __msa_subv_h(__msa_addv_h(_r04, _r03), __msa_slli_h(__msa_addv_h(_r01, _r02), 2));
v8i16 _tmp2m = __msa_addv_h(__msa_subv_h(_r04, _r03), __msa_slli_h(__msa_subv_h(_r01, _r02), 2));
v8i16 _tmp3m = __msa_subv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1));
v8i16 _tmp4m = __msa_addv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1));
v8i16 _tmp5m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r01, 2), _r05), __msa_mulv_h(_r03, _v5));
__msa_st_h(_tmp0m, tmp[0][m], 0);
__msa_st_h(_tmp1m, tmp[1][m], 0);
__msa_st_h(_tmp2m, tmp[2][m], 0);
__msa_st_h(_tmp3m, tmp[3][m], 0);
__msa_st_h(_tmp4m, tmp[4][m], 0);
__msa_st_h(_tmp5m, tmp[5][m], 0);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
v8i16 _tmp00 = __msa_ld_h(tmp[m][0], 0);
v8i16 _tmp01 = __msa_ld_h(tmp[m][1], 0);
v8i16 _tmp02 = __msa_ld_h(tmp[m][2], 0);
v8i16 _tmp03 = __msa_ld_h(tmp[m][3], 0);
v8i16 _tmp04 = __msa_ld_h(tmp[m][4], 0);
v8i16 _tmp05 = __msa_ld_h(tmp[m][5], 0);
v8i16 _v5 = __msa_fill_h(5);
v8i16 _r0tm0 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp00, 2), _tmp04), __msa_mulv_h(_tmp02, _v5));
v8i16 _r0tm1 = __msa_subv_h(__msa_addv_h(_tmp04, _tmp03), __msa_slli_h(__msa_addv_h(_tmp01, _tmp02), 2));
v8i16 _r0tm2 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp03), __msa_slli_h(__msa_subv_h(_tmp01, _tmp02), 2));
v8i16 _r0tm3 = __msa_subv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1));
v8i16 _r0tm4 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1));
v8i16 _r0tm5 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp01, 2), _tmp05), __msa_mulv_h(_tmp03, _v5));
__msa_st_h(_r0tm0, r0_tm_0, 0);
__msa_st_h(_r0tm1, r0_tm_1, 0);
__msa_st_h(_r0tm2, r0_tm_2, 0);
__msa_st_h(_r0tm3, r0_tm_3, 0);
__msa_st_h(_r0tm4, r0_tm_4, 0);
__msa_st_h(_r0tm5, r0_tm_5, 0);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 1 < tiles; i += 2)
{
short* tmpptr = tm2.row<short>(i / 2);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
v8i16 _r0 = __msa_ld_h(r0, 0);
v8i16 _r1 = __msa_ld_h(r0 + 8, 0);
__msa_st_h(_r0, tmpptr, 0);
__msa_st_h(_r1, tmpptr + 8, 0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
short* tmpptr = tm2.row<short>(i / 2 + i % 2);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
v8i16 _r0 = __msa_ld_h(r0, 0);
__msa_st_h(_r0, tmpptr, 0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * 4, 4, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 1 < tiles; i += 2)
{
const short* r0 = bb2.row<const short>(i / 2);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
v4i32 _sum2 = __msa_fill_w(0);
v4i32 _sum3 = __msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 64);
__builtin_prefetch(k0 + 128);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _w1 = __msa_ld_h(k0 + 8, 0);
v8i16 _w2 = __msa_ld_h(k0 + 16, 0);
v8i16 _w3 = __msa_ld_h(k0 + 24, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v8i16 _extw1 = __msa_clti_s_h(_w1, 0);
v8i16 _extw2 = __msa_clti_s_h(_w2, 0);
v8i16 _extw3 = __msa_clti_s_h(_w3, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1);
v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1);
v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2);
v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2);
v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3);
v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3);
v4i32 _val0_0 = __msa_fill_w(r0[0]);
v4i32 _val0_1 = __msa_fill_w(r0[1]);
v4i32 _val0_2 = __msa_fill_w(r0[2]);
v4i32 _val0_3 = __msa_fill_w(r0[3]);
v4i32 _val0_4 = __msa_fill_w(r0[4]);
v4i32 _val0_5 = __msa_fill_w(r0[5]);
v4i32 _val0_6 = __msa_fill_w(r0[6]);
v4i32 _val0_7 = __msa_fill_w(r0[7]);
v4i32 _val1_0 = __msa_fill_w(r0[8]);
v4i32 _val1_1 = __msa_fill_w(r0[9]);
v4i32 _val1_2 = __msa_fill_w(r0[10]);
v4i32 _val1_3 = __msa_fill_w(r0[11]);
v4i32 _val1_4 = __msa_fill_w(r0[12]);
v4i32 _val1_5 = __msa_fill_w(r0[13]);
v4i32 _val1_6 = __msa_fill_w(r0[14]);
v4i32 _val1_7 = __msa_fill_w(r0[15]);
_sum0 = __msa_maddv_w(_sum0, _w0l, _val0_0);
_sum1 = __msa_maddv_w(_sum1, _w0h, _val0_1);
_sum2 = __msa_maddv_w(_sum2, _w0l, _val1_0);
_sum3 = __msa_maddv_w(_sum3, _w0h, _val1_1);
_sum0 = __msa_maddv_w(_sum0, _w1l, _val0_2);
_sum1 = __msa_maddv_w(_sum1, _w1h, _val0_3);
_sum2 = __msa_maddv_w(_sum2, _w1l, _val1_2);
_sum3 = __msa_maddv_w(_sum3, _w1h, _val1_3);
_sum0 = __msa_maddv_w(_sum0, _w2l, _val0_4);
_sum1 = __msa_maddv_w(_sum1, _w2h, _val0_5);
_sum2 = __msa_maddv_w(_sum2, _w2l, _val1_4);
_sum3 = __msa_maddv_w(_sum3, _w2h, _val1_5);
_sum0 = __msa_maddv_w(_sum0, _w3l, _val0_6);
_sum1 = __msa_maddv_w(_sum1, _w3h, _val0_7);
_sum2 = __msa_maddv_w(_sum2, _w3l, _val1_6);
_sum3 = __msa_maddv_w(_sum3, _w3h, _val1_7);
r0 += 16;
k0 += 32;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
_sum2 = __msa_addv_w(_sum2, _sum3);
__msa_st_w(_sum0, output0_tm, 0);
__msa_st_w(_sum2, output0_tm + 4, 0);
output0_tm += 8;
}
for (; i < tiles; i++)
{
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 128);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _w1 = __msa_ld_h(k0 + 8, 0);
v8i16 _w2 = __msa_ld_h(k0 + 16, 0);
v8i16 _w3 = __msa_ld_h(k0 + 24, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v8i16 _extw1 = __msa_clti_s_h(_w1, 0);
v8i16 _extw2 = __msa_clti_s_h(_w2, 0);
v8i16 _extw3 = __msa_clti_s_h(_w3, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1);
v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1);
v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2);
v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2);
v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3);
v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3);
v4i32 _val0 = __msa_fill_w(r0[0]);
v4i32 _val1 = __msa_fill_w(r0[1]);
v4i32 _val2 = __msa_fill_w(r0[2]);
v4i32 _val3 = __msa_fill_w(r0[3]);
v4i32 _val4 = __msa_fill_w(r0[4]);
v4i32 _val5 = __msa_fill_w(r0[5]);
v4i32 _val6 = __msa_fill_w(r0[6]);
v4i32 _val7 = __msa_fill_w(r0[7]);
_sum0 = __msa_maddv_w(_sum0, _w0l, _val0);
_sum1 = __msa_maddv_w(_sum1, _w0h, _val1);
_sum0 = __msa_maddv_w(_sum0, _w1l, _val2);
_sum1 = __msa_maddv_w(_sum1, _w1h, _val3);
_sum0 = __msa_maddv_w(_sum0, _w2l, _val4);
_sum1 = __msa_maddv_w(_sum1, _w2h, _val5);
_sum0 = __msa_maddv_w(_sum0, _w3l, _val6);
_sum1 = __msa_maddv_w(_sum1, _w3h, _val7);
r0 += 8;
k0 += 32;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
__msa_st_w(_sum0, output0_tm, 0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u * 4, 4, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6][4];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 4;
const int* output0_tm_1 = output0_tm_0 + tiles * 4;
const int* output0_tm_2 = output0_tm_0 + tiles * 8;
const int* output0_tm_3 = output0_tm_0 + tiles * 12;
const int* output0_tm_4 = output0_tm_0 + tiles * 16;
const int* output0_tm_5 = output0_tm_0 + tiles * 20;
int* output0 = out0.row<int>(i * 4) + (j * 4) * 4;
for (int m = 0; m < 5; m++)
{
v4i32 _out0tm0 = __msa_ld_w(output0_tm_0, 0);
v4i32 _out0tm1 = __msa_ld_w(output0_tm_1, 0);
v4i32 _out0tm2 = __msa_ld_w(output0_tm_2, 0);
v4i32 _out0tm3 = __msa_ld_w(output0_tm_3, 0);
v4i32 _out0tm4 = __msa_ld_w(output0_tm_4, 0);
v4i32 _out0tm5 = __msa_ld_w(output0_tm_5, 0);
v4i32 _tmp02a = __msa_addv_w(_out0tm1, _out0tm2);
v4i32 _tmp13a = __msa_subv_w(_out0tm1, _out0tm2);
v4i32 _tmp02b = __msa_addv_w(_out0tm3, _out0tm4);
v4i32 _tmp13b = __msa_subv_w(_out0tm3, _out0tm4);
v4i32 _tmp0m = __msa_addv_w(__msa_addv_w(_out0tm0, _tmp02a), _tmp02b);
v4i32 _tmp1m = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1));
v4i32 _tmp2m = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2));
v4i32 _tmp3m = __msa_addv_w(__msa_addv_w(_tmp13a, __msa_slli_w(_out0tm5, 2)), __msa_slli_w(_tmp13b, 3));
__msa_st_w(_tmp0m, tmp[0][m], 0);
__msa_st_w(_tmp1m, tmp[1][m], 0);
__msa_st_w(_tmp2m, tmp[2][m], 0);
__msa_st_w(_tmp3m, tmp[3][m], 0);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 5; m < 6; m++)
{
v4i32 _out0tm0 = __msa_ld_w(output0_tm_0, 0);
v4i32 _out0tm1 = __msa_ld_w(output0_tm_1, 0);
v4i32 _out0tm2 = __msa_ld_w(output0_tm_2, 0);
v4i32 _out0tm3 = __msa_ld_w(output0_tm_3, 0);
v4i32 _out0tm4 = __msa_ld_w(output0_tm_4, 0);
v4i32 _out0tm5 = __msa_ld_w(output0_tm_5, 0);
v4i32 _tmp02a = __msa_addv_w(_out0tm1, _out0tm2);
v4i32 _tmp13a = __msa_subv_w(_out0tm1, _out0tm2);
v4i32 _tmp02b = __msa_addv_w(_out0tm3, _out0tm4);
v4i32 _tmp13b = __msa_subv_w(_out0tm3, _out0tm4);
v4i32 _tmp0m = __msa_addv_w(__msa_addv_w(_out0tm0, _tmp02a), _tmp02b);
v4i32 _tmp1m = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1));
v4i32 _tmp2m = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2));
v4i32 _tmp3m = __msa_addv_w(__msa_addv_w(_tmp13a, __msa_slli_w(_out0tm5, 2)), __msa_slli_w(_tmp13b, 3));
_tmp0m = __msa_slli_w(_tmp0m, 2);
_tmp1m = __msa_slli_w(_tmp1m, 2);
_tmp2m = __msa_slli_w(_tmp2m, 2);
_tmp3m = __msa_slli_w(_tmp3m, 2);
__msa_st_w(_tmp0m, tmp[0][m], 0);
__msa_st_w(_tmp1m, tmp[1][m], 0);
__msa_st_w(_tmp2m, tmp[2][m], 0);
__msa_st_w(_tmp3m, tmp[3][m], 0);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
v4i32 _tmp00 = __msa_ld_w(tmp[m][0], 0);
v4i32 _tmp01 = __msa_ld_w(tmp[m][1], 0);
v4i32 _tmp02 = __msa_ld_w(tmp[m][2], 0);
v4i32 _tmp03 = __msa_ld_w(tmp[m][3], 0);
v4i32 _tmp04 = __msa_ld_w(tmp[m][4], 0);
v4i32 _tmp05 = __msa_ld_w(tmp[m][5], 0);
v4i32 _tmp02a = __msa_addv_w(_tmp01, _tmp02);
v4i32 _tmp13a = __msa_subv_w(_tmp01, _tmp02);
v4i32 _tmp02b = __msa_addv_w(_tmp03, _tmp04);
v4i32 _tmp13b = __msa_subv_w(_tmp03, _tmp04);
v4i32 _out00 = __msa_addv_w(__msa_addv_w(_tmp00, _tmp02a), _tmp02b);
v4i32 _out01 = __msa_addv_w(_tmp13a, __msa_slli_w(_tmp13b, 1));
v4i32 _out02 = __msa_addv_w(_tmp02a, __msa_slli_w(_tmp02b, 2));
v4i32 _out03 = __msa_addv_w(__msa_addv_w(_tmp05, _tmp13a), __msa_slli_w(_tmp13b, 3));
// TODO use integer trick for division by 576
v4f32 _v576 = __msa_fill_w_f32(1.0 / 576);
_out00 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out00), _v576));
_out01 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out01), _v576));
_out02 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out02), _v576));
_out03 = __msa_ftint_s_w(__msa_fmul_w(__msa_ffint_s_w(_out03), _v576));
__msa_st_w(_out00, output0, 0);
__msa_st_w(_out01, output0 + 4, 0);
__msa_st_w(_out02, output0 + 8, 0);
__msa_st_w(_out03, output0 + 12, 0);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
DataGen.h | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include <boost/algorithm/string/predicate.hpp>
#include <cstring>
#include <memory>
#include <random>
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
#include <knowhere/index/vector_index/VecIndexFactory.h>
#include <knowhere/index/vector_index/IndexIVF.h>
#include "Constants.h"
#include "common/Schema.h"
#include "query/SearchOnIndex.h"
#include "segcore/SegmentGrowingImpl.h"
#include "segcore/SegmentSealedImpl.h"
using boost::algorithm::starts_with;
namespace milvus::segcore {
struct GeneratedData {
std::vector<uint8_t> rows_;
std::vector<aligned_vector<uint8_t>> cols_;
std::vector<idx_t> row_ids_;
std::vector<Timestamp> timestamps_;
RowBasedRawData raw_;
template <typename T>
auto
get_col(int index) const {
auto& target = cols_.at(index);
std::vector<T> ret(target.size() / sizeof(T));
memcpy(ret.data(), target.data(), target.size());
return ret;
}
template <typename T>
auto
get_mutable_col(int index) {
auto& target = cols_.at(index);
assert(target.size() == row_ids_.size() * sizeof(T));
auto ptr = reinterpret_cast<T*>(target.data());
return ptr;
}
private:
GeneratedData() = default;
friend GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed, uint64_t ts_offset);
void
generate_rows(int64_t N, SchemaPtr schema);
};
inline void
GeneratedData::generate_rows(int64_t N, SchemaPtr schema) {
std::vector<int> offset_infos(schema->size() + 1, 0);
auto sizeof_infos = schema->get_sizeof_infos();
std::partial_sum(sizeof_infos.begin(), sizeof_infos.end(), offset_infos.begin() + 1);
int64_t len_per_row = offset_infos.back();
assert(len_per_row == schema->get_total_sizeof());
// change column-based data to row-based data
std::vector<uint8_t> result(len_per_row * N);
for (int index = 0; index < N; ++index) {
for (int fid = 0; fid < schema->size(); ++fid) {
auto len = sizeof_infos[fid];
auto offset = offset_infos[fid];
auto src = cols_[fid].data() + index * len;
auto dst = result.data() + index * len_per_row + offset;
memcpy(dst, src, len);
}
}
rows_ = std::move(result);
raw_.raw_data = rows_.data();
raw_.sizeof_per_row = schema->get_total_sizeof();
raw_.count = N;
}
inline GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42, uint64_t ts_offset = 0) {
using std::vector;
std::vector<aligned_vector<uint8_t>> cols;
std::default_random_engine er(seed);
std::normal_distribution<> distr(0, 1);
int offset = 0;
auto insert_cols = [&cols](auto& data) {
using T = std::remove_reference_t<decltype(data)>;
auto len = sizeof(typename T::value_type) * data.size();
auto ptr = aligned_vector<uint8_t>(len);
memcpy(ptr.data(), data.data(), len);
cols.emplace_back(std::move(ptr));
};
for (auto& field : schema->get_fields()) {
switch (field.get_data_type()) {
case engine::DataType::VECTOR_FLOAT: {
auto dim = field.get_dim();
vector<float> final(dim * N);
bool is_ip = starts_with(field.get_name().get(), "normalized");
#pragma omp parallel for
for (int n = 0; n < N; ++n) {
vector<float> data(dim);
float sum = 0;
std::default_random_engine er2(seed + n);
std::normal_distribution<> distr2(0, 1);
for (auto& x : data) {
x = distr2(er2) + offset;
sum += x * x;
}
if (is_ip) {
sum = sqrt(sum);
for (auto& x : data) {
x /= sum;
}
}
std::copy(data.begin(), data.end(), final.begin() + dim * n);
}
insert_cols(final);
break;
}
case engine::DataType::VECTOR_BINARY: {
auto dim = field.get_dim();
Assert(dim % 8 == 0);
vector<uint8_t> data(dim / 8 * N);
for (auto& x : data) {
x = er();
}
insert_cols(data);
break;
}
case engine::DataType::INT64: {
vector<int64_t> data(N);
// begin with counter
if (starts_with(field.get_name().get(), "counter")) {
int64_t index = 0;
for (auto& x : data) {
x = index++;
}
} else {
int i = 0;
for (auto& x : data) {
x = er() % (2 * N);
x = i;
i++;
}
}
insert_cols(data);
break;
}
case engine::DataType::INT32: {
vector<int> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::INT16: {
vector<int16_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::INT8: {
vector<int8_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::FLOAT: {
vector<float> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
case engine::DataType::DOUBLE: {
vector<double> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
default: {
throw std::runtime_error("unimplemented");
}
}
++offset;
}
GeneratedData res;
res.cols_ = std::move(cols);
for (int i = 0; i < N; ++i) {
res.row_ids_.push_back(i);
res.timestamps_.push_back(i + ts_offset);
}
// std::shuffle(res.row_ids_.begin(), res.row_ids_.end(), er);
res.generate_rows(N, schema);
return res;
}
inline auto
CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
std::normal_distribution<double> dis(0, 1);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(dis(e));
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
int64_t src_index = 0;
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(src[src_index++]);
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(e());
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(*ptr);
++ptr;
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline json
SearchResultToJson(const SearchResult& sr) {
int64_t num_queries = sr.num_queries_;
int64_t topk = sr.topk_;
std::vector<std::vector<std::string>> results;
for (int q = 0; q < num_queries; ++q) {
std::vector<std::string> result;
for (int k = 0; k < topk; ++k) {
int index = q * topk + k;
result.emplace_back(std::to_string(sr.ids_[index]) + "->" + std::to_string(sr.distances_[index]));
}
results.emplace_back(std::move(result));
}
return json{results};
};
inline void
SealedLoader(const GeneratedData& dataset, SegmentSealed& seg) {
// TODO
auto row_count = dataset.row_ids_.size();
{
LoadFieldDataInfo info;
info.blob = dataset.row_ids_.data();
info.row_count = dataset.row_ids_.size();
info.field_id = 0; // field id for RowId
seg.LoadFieldData(info);
}
{
LoadFieldDataInfo info;
info.blob = dataset.timestamps_.data();
info.row_count = dataset.timestamps_.size();
info.field_id = 1;
seg.LoadFieldData(info);
}
int field_offset = 0;
for (auto& meta : seg.get_schema().get_fields()) {
LoadFieldDataInfo info;
info.field_id = meta.get_id().get();
info.row_count = row_count;
info.blob = dataset.cols_[field_offset].data();
seg.LoadFieldData(info);
++field_offset;
}
}
inline std::unique_ptr<SegmentSealed>
SealedCreator(SchemaPtr schema, const GeneratedData& dataset, const LoadIndexInfo& index_info) {
auto segment = CreateSealedSegment(schema);
SealedLoader(dataset, *segment);
segment->LoadIndex(index_info);
return segment;
}
inline knowhere::VecIndexPtr
GenIndexing(int64_t N, int64_t dim, const float* vec) {
// {knowhere::IndexParams::nprobe, 10},
auto conf = knowhere::Config{{knowhere::meta::DIM, dim},
{knowhere::IndexParams::nlist, 1024},
{knowhere::Metric::TYPE, knowhere::Metric::L2},
{knowhere::meta::DEVICEID, 0}};
auto database = knowhere::GenDataset(N, dim, vec);
auto indexing = std::make_shared<knowhere::IVF>();
indexing->Train(database, conf);
indexing->AddWithoutIds(database, conf);
return indexing;
}
} // namespace milvus::segcore
|
schedule.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
main(int argc, char **argv) {
int i, n=20, a[n], suma=0;
if(argc < 2) {
fprintf(stderr,"\nFalta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]); if (n>20) n=20;
for(i = 0; i<n; i++)
a[i] = i;
#pragma omp parallel
{
int sumalocal = 0;
#pragma omp for schedule(static)
for(i = 0; i<n; i++){
sumalocal += a[i];
printf("thread %d suma de a[%d]=%d sumalocal=%d\n",omp_get_thread_num(),i,a[i],sumalocal);
}
#pragma omp atomic
suma += sumalocal;
}
printf("Fuera de 'parallel' suma=%d\n", suma);
} |
DRB025-simdtruedep-var-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This one has race condition due to true dependence.
But data races happen at instruction level, not thread level.
Data race pair: a[i+1]@68:5 vs. a[i]@68:12
*/
#include <stdlib.h>
int main(int argc, char * argv[])
{
int i;
int len = 100;
int a[len], b[len];
int _ret_val_0;
if (argc>1)
{
len=atoi(argv[1]);
}
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
a[i]=i;
b[i]=(i+1);
}
#pragma cetus private(i)
#pragma loop name main#1
for (i=0; i<(len-1); i ++ )
{
a[i+1]=(a[i]*b[i]);
}
#pragma cetus private(i)
#pragma loop name main#2
for (i=0; i<len; i ++ )
{
printf("%d %d\n", a[i], b[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
|
Util.h | //
// Created by Bangtian Liu on 6/28/19.
//
#ifndef PROJECT_UTIL_H
#define PROJECT_UTIL_H
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <random>
#include <mkl.h>
#include <cstring>
//#include "../sympiler/nUtil.h"
using namespace std;
typedef enum{
KS_GAUSSIAN,
KS_POLYNOMIAL,
KS_LAPLACE,
KS_GAUSSIAN_VAR_BANDWIDTH,
KS_TANH,
KS_QUARTIC,
KS_MULTIQUADRATIC,
KS_EPANECHNIKOV,
KS_LOG,
KS_EXPONENTIAL,
KS_NEWTON
}Ktype;
int preprocesbin(std::string name)
{
ifstream file(name.data(), std::ios::in|std::ios::binary|std::ios::ate);
auto size = file.tellg();
return (int)size/sizeof(double);
}
int preprocesoffset(std::string name)
{
ifstream file(name.data(), std::ios::in|std::ios::binary|std::ios::ate);
auto size = file.tellg();
return (int)size/sizeof(int);
}
int preprocesLoffset(std::string name)
{
ifstream file(name.data(), std::ios::in|std::ios::binary|std::ios::ate);
auto size = file.tellg();
return (int)size/sizeof(uint64_t);
}
void bin2read(std::string name, double *mat, int len)
{
ifstream in(name.data(), ios::in | ios::binary|std::ios::ate);
in.seekg( 0, std::ios::beg );
in.read( (char*)mat, len*sizeof(double));
in.close();
}
void bin2read(std::string name, int *mat, int len)
{
ifstream in(name.data(), ios::in | ios::binary|std::ios::ate);
in.seekg( 0, std::ios::beg );
in.read( (char*)mat, len*sizeof(int));
in.close();
}
int preprocesstxt(std::string name)
{
ifstream in(name.data());
string line;
int len=0;
while(getline(in, line)){
++len;
}
return len;
}
void txt2read(std::string name, int *offset)
{
int index=0;
ifstream in(name.data());
string line;
while(getline(in,line)){
istringstream liness(line);
liness >> offset[index];
++index;
}
}
void pairtxt2read(std::string name, int *nodex, int *nodey)
{
int index = 0;
ifstream in(name.data());
string line;
int i, j;
while(getline(in,line)){
istringstream liness(line);
liness >> i >> j;
// printf("pair (%d, %d)\n",i,j);
nodex[index]=i;
nodey[index]=j;
++index;
}
in.close();
}
int processlevelsets(std::string name, int &len)
{
ifstream in(name.data());
string line;
int level, idx;
int depth=0;
while(getline(in, line))
{
istringstream liness(line);
liness >> level >> idx ;
depth = std::max(depth,level);
++len;
}
return depth;
}
void readlevelset(std::string name, int *levelset, int *idx, int len, int n)
{
ifstream in(name.data());
string line;
int l, x;
int colCnt=0, nnz=0;
levelset[0]=0;
for(int i=1; nnz<len;)
{
in >> l;
in >> x;
if(l==i){
idx[nnz] = x;
colCnt++;
nnz++;
}
else {
levelset[i]=levelset[i-1] + colCnt;
i++;
colCnt=1;
idx[nnz] = x;
nnz ++;
}
}
levelset[n] = levelset[n-1] + colCnt;
}
void randu(int nrow, int ncol, double * array, double a, double b)
{
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution( a, b );
//#pragma omp parallel for
for (int i = 0; i < ncol ; ++i) {
for (int j = 0; j < nrow; ++j) {
array[i*nrow + j ] = distribution(generator);
//array[i*nrow + j ] = 1.0;
// array[j*ncol + i] =array[i*ncol + j];
}
}
}
void Fsubmatrix(std::vector<int> &amap, std::vector<int> &bmap, double *submatrix, Ktype ktype, double *X,
int d, double h
)
{
switch (ktype) {
case KS_GAUSSIAN: {
#pragma omp parallel for
for (int j = 0; j < bmap.size(); ++j) {
for (int i = 0; i < amap.size(); ++i) {
auto Kij = 0.0;
#pragma omp simd reduction(+:Kij)
for (int k = 0; k < d; ++k) {
auto col = bmap[j];
auto row = amap[i];
auto tar = X[col * d + k];
auto src = X[row * d + k];
Kij += (tar - src) * (tar - src);
}
Kij = exp(-Kij / (2 * h * h));
// Kij = 1.0;
submatrix[j * amap.size() + i] = Kij;
}
}
break;
}
case KS_LOG: {
for (int j = 0; j < bmap.size(); j++) {
for (int i = 0; i < amap.size(); i++) {
auto Kij = 0.0;
for (int k = 0; k < d; ++k) {
auto col = bmap[j];
auto row = amap[i];
auto tar = X[col * d + k];
auto src = X[row * d + k];
Kij += (tar - src) * (tar - src);
}
submatrix[j * amap.size() + i] = -0.5 * log(Kij);
if (amap[i] == bmap[j])submatrix[j * amap.size() + i] = 1.0;
}
}
break;
}
case KS_EXPONENTIAL: {
for (int j = 0; j < bmap.size(); j++) {
for (int i = 0; i < amap.size(); i++) {
auto Kij = 0.0;
for (int k = 0; k < d; ++k) {
auto col = bmap[j];
auto row = amap[i];
auto tar = X[col * d + k];
auto src = X[row * d + k];
Kij += (tar - src) * (tar - src);
}
submatrix[j * amap.size() + i] = exp(-sqrt(Kij));
// if(i==j)submatrix[j*amap.size()+i]=1.0;
}
}
break;
}
case KS_NEWTON: {
for (int j = 0; j < bmap.size(); j++) {
for (int i = 0; i < amap.size(); i++) {
auto Kij = 0.0;
for (int k = 0; k < d; ++k) {
auto col = bmap[j];
auto row = amap[i];
auto tar = X[col * d + k];
auto src = X[row * d + k];
Kij += (tar - src) * (tar - src);
}
if(Kij==0)Kij=1;
submatrix[j * amap.size() + i] = 1/sqrt(Kij);
}
}
break;
}
default: {
printf("invalid kernel type\n");
exit(1);
break;
}
}
}
double computeError(int *lids, int len, int nrhs, int n, double *X, int d, double *W, double *U)
{
// len=100;
int ntest=10;
// printf("len=%d\n", len);
auto amap = std::vector<int>(len);
auto bmap = std::vector<int>(n);
for(int i=0;i<len;i++)
{
amap[i] = lids[i];
// printf("idx=%d\n",amap[i]);
}
for(int i=0;i<n;i++)
{
bmap[i] = i;
}
auto Kab = (double *)mkl_malloc(sizeof(double)*len*n,64);
double *result = (double *)mkl_malloc(sizeof(double)*len*nrhs,64);
memset(result, 0, sizeof(double)*len*nrhs);
Ktype ktype = KS_GAUSSIAN;
Fsubmatrix(amap,bmap,Kab,ktype,X,d,5);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
len,nrhs,n,1.0f,
Kab,len,
W, n, 0.0,
result,len
);
double error = 0.0f;
double nrm2 = 0.0f;
double averror = 0.0f;
for(int i=0; i<ntest; ++i)
{
error = 0.0f;
nrm2 = 0.0f;
for(int j=0; j<nrhs; ++j)
{
// printf("i=%d res=%f app=%f\n",i, result[j*len+i], U[j*len+i]);
error += (result[j*len+i]-U[j*len+i])*(result[j*len+i]-U[j*len+i]);
nrm2 += (result[j*len+i]*result[j*len+i]);
}
error = std::sqrt(error);
nrm2 = std::sqrt(nrm2);
averror += error/nrm2;
}
return averror/ntest;
}
std::string sadd(string &str)
{
std::string path = "../sympiler/";
path.append(str);
return path;
}
void PrintMatrix(double *mat, int nrow, int ncol, string name)
{
std::cout<<"Matirx: " << name << "\n";
for(int i=0;i<nrow;i++)
{
for(int j=0;j<ncol;j++)
{
printf("%f\t",mat[j*nrow+i]);
}
printf("\n");
}
}
#endif //PROJECT_UTIL_H
|
direct_method.c | #include "direct_method.h"
#include "IO.h"
#include "mpi.h"
#include "omp.h"
/* Here are the initialization of the global variables: */
bodies_t bodies;
char *Direct_data_file;
bool Direct_are_data_bzipped2 = FALSE;
position_t center;
COORDINATES_T half_side;
extern int nb_proc;
extern int my_rank;
FMB_Info_t FMB_Info;
/* Buffer des Pj et Fj pour les calculs distants */
COORDINATES_T *pj_pos_x;
COORDINATES_T *pj_pos_y;
COORDINATES_T *pj_pos_z;
COORDINATES_T *pj_fx;
COORDINATES_T *pj_fy;
COORDINATES_T *pj_fz;
/* L'ensemble des masses */
VALUES_T *p_allvalues;
/* pointeur vers les masses courantes */
VALUES_T *p_values;
/* See definition in 'FMB.c'. */
void bunzip2_file(const char *filename);
void bzip2_file(const char *filename);
#define SWAP(x1, y1, z1, v1, x2, y2, z2, v2, tmp) tmp=x1; x1=x2; x2=tmp; tmp=y1; y1=y2; y2=tmp; tmp=z1; z1=z2; z2=tmp;
/*********************************************************************************************
**********************************************************************************************
Direct_method_Init
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Init(){
/* Checking: */
if (f_output == NULL){
FMB_error("'f_output' must be set.\n");
}
/************************************ eps_soft_square: **********************************************/
fprintf(f_output, "Softening parameter: %.1e\n", FMB_Info.eps_soft);
FMB_Info.eps_soft_square = FMB_Info.eps_soft * FMB_Info.eps_soft;
/* Clear 'center' and 'half_side': */
position_Initialize(¢er);
half_side = (COORDINATES_T) 0.0;
}
/*********************************************************************************************
**********************************************************************************************
Direct_method_Data
**********************************************************************************************
*********************************************************************************************/
/* Initialisation des buffers Pj */
void Direct_method_Pj_Initialize(bodies_ind_t nb_bodies)
{
bodies_ind_t k;
pj_pos_x = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_pos_y = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_pos_z = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_fx = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_fy = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_fz = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
#pragma omp parallel for schedule(static)
for (k = 0; k < nb_bodies; ++k)
{
pj_pos_x[k] = 0;
pj_pos_y[k] = 0;
pj_pos_z[k] = 0;
pj_fx[k] = 0;
pj_fy[k] = 0;
pj_fz[k] = 0;
}
}
/* Free les Pj*/
void Direct_method_Pj_Terminate()
{
FMB_free(pj_pos_x);
FMB_free(pj_pos_y);
FMB_free(pj_pos_z);
FMB_free(pj_fx);
FMB_free(pj_fy);
FMB_free(pj_fz);
}
/* Send des données depuis un buffer d'envoi du processus 0 au processus p*/
void Direct_method_Data_Send(int p, position_t *spd, MPI_Request *req)
{
MPI_Isend(pj_pos_x, bodies.nb_bodies, MY_MPI_F, p, 1, MPI_COMM_WORLD, req);
MPI_Isend(pj_pos_y, bodies.nb_bodies, MY_MPI_F, p, 2, MPI_COMM_WORLD, req+1);
MPI_Isend(pj_pos_z, bodies.nb_bodies, MY_MPI_F, p, 3, MPI_COMM_WORLD, req+2);
MPI_Isend(spd, bodies.nb_bodies * 3, MY_MPI_F, p, 5, MPI_COMM_WORLD, req+3);
}
/* Initialisation des corps et reception des données */
/* utilisé par les processus autres que 0 */
void Direct_method_Data_InitRecv()
{
bodies_ind_t nb_bodies;
bodies_ind_t nb_bodies_total;
MPI_Request req[4];
/* Recuperer le nombre de corps local */
MPI_Bcast(&nb_bodies, 1, MPI_LONG, 0, MPI_COMM_WORLD);
/* retrouver le nombre de corps total et allouer les masses */
nb_bodies_total = nb_bodies * nb_proc;
p_allvalues = FMB_malloc_with_check(nb_bodies_total * sizeof(VALUES_T));
/* Allocation et initialisation des corps */
bodies_Initialize(&bodies, nb_bodies);
bodies.nb_bodies = nb_bodies;
bodies.size_allocated = nb_bodies;
bodies.p_values = p_allvalues + nb_bodies * (my_rank - 1);
/* On place le pointeur des masses */
p_values = bodies.p_values + nb_bodies;
/* Reception des corps (position et vitesses) */
MPI_Irecv(bodies.p_pos_x, nb_bodies, MY_MPI_F, 0, 1, MPI_COMM_WORLD, req);
MPI_Irecv(bodies.p_pos_y, nb_bodies, MY_MPI_F, 0, 2, MPI_COMM_WORLD, req+1);
MPI_Irecv(bodies.p_pos_z, nb_bodies, MY_MPI_F, 0, 3, MPI_COMM_WORLD, req+2);
MPI_Irecv(bodies.p_speed_vectors, nb_bodies * 3, MY_MPI_F, 0, 5, MPI_COMM_WORLD, req+3); /* Utiliser un type MPI_STRUCT si machines non homogènes */
bodies_ClearFP(&bodies);
/* Recuperation des masses */
MPI_Bcast(p_allvalues, nb_bodies_total, MY_MPI_F, 0, MPI_COMM_WORLD);
/* Initialisation des buffers Pj */
Direct_method_Pj_Initialize(nb_bodies);
/* On termine la reception */
MPI_Wait(req, NULL);
MPI_Wait(req+1, NULL);
MPI_Wait(req+2, NULL);
MPI_Wait(req+3, NULL);
}
/* Lecture et envoie des données */
/* Appelé uniquement par le processus 0 */
void Direct_method_Data(char *data_file){
bodies_ind_t k;
bodies_ind_t nb_bodies;
int i;
VALUES_T *current_values;
void *swap;
position_t *spd;
MPI_Request id[4];
if (INFO_DISPLAY(2)){
fprintf(f_output, "Opening data file \'%s\' for direct computation... \n", data_file);
}
/* Initialize Input operations: */
FMB_IO_InitI(data_file);
FMB_IO_Scan_header(&nb_bodies, ¢er, &half_side);
if (INFO_DISPLAY(1)){
fprintf(f_output, "Bodies number: ");
fprintf(f_output, FORMAT_BODIES_IND_T, nb_bodies);
fprintf(f_output, "\n");
fflush(f_output);
}
/* Allocation des masses */
p_allvalues = FMB_malloc_with_check(nb_bodies * sizeof(VALUES_T));
/* Positionnement du pointeur des masses courantes */
p_values = p_allvalues;
current_values = p_allvalues;
/* nombre local de corps */
nb_bodies /= nb_proc;
/* On envoie le nombre de corps */
MPI_Bcast(&nb_bodies, 1, MPI_LONG, 0, MPI_COMM_WORLD);
/* On initialise les corps et les buffers Pj et spd */
bodies_Initialize(&bodies, nb_bodies);
Direct_method_Pj_Initialize(nb_bodies);
spd = FMB_malloc_with_check(nb_bodies * sizeof(position_t));
/* Pour chaque processus */
for (i = 1; i <= nb_proc; ++i)
{
/* On remplit bodies des nb_bodies corps suivants */
bodies.nb_bodies = 0;
bodies.p_values = current_values; /* On place le pointeur de masse au bon endroit du buffer */
for (k=0; k<nb_bodies; ++k)
{
body_t body_tmp;
body_Initialize(&body_tmp);
if (FMB_IO_Scan_body(&body_tmp) != 1)
FMB_error("In Direct_method_Data(): FMB_IO_Scan_body() failed for body #%i\n", k);
/* if (k<100){ body_Display(&body_tmp, f_output); } */
bodies_Add(&bodies, &body_tmp);
}
/* On les envoie (en asynchrone) */
if (i != 1)
{
MPI_Wait(id,NULL); MPI_Wait(id+1,NULL);
MPI_Wait(id+2,NULL); MPI_Wait(id+3,NULL);
}
if (i != nb_proc)
{
SWAP(bodies.p_pos_x, bodies.p_pos_y, bodies.p_pos_z, bodies.p_speed_vectors,
pj_pos_x, pj_pos_y, pj_pos_z, spd, swap);
Direct_method_Data_Send(i, spd, id);
}
current_values += nb_bodies;
}
/* Du coup le processus 0 conserve les derniers corps. Il aurait été malin que ce soit le processus nb_proc - 1 qui fasse la lecture. */
/* Chaque processus doit récuperer l'ensemble des masses. */
MPI_Bcast(p_allvalues, nb_bodies * nb_proc, MY_MPI_F, 0, MPI_COMM_WORLD);
bodies_ClearFP(&bodies);
FMB_free(spd);
/* Terminate Input operations: */
FMB_IO_TerminateI();
}
/*********************************************************************************************
********************************************************************************************
**********************************************************************************************
Direct_method_Data_bodies
**********************************************************************************************
*********************************************************************************************/
/* Same as Direct_method_Data() but we use the position and values
* of all bodies stored in 'p_b' (instead of the bodies stored
* in the file "data_file" in Direct_method_Data()). */
void Direct_method_Data_bodies(bodies_t *p_b){
bodies_it_t it;
bodies_Initialize(&bodies, bodies_Nb_bodies(p_b));
for (bodies_it_Initialize(&it, p_b);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
body_t body_tmp;
bodies_it_Get_body(&it, &body_tmp);
bodies_Add(&bodies, &body_tmp);
}
bodies_ClearFP(&bodies);
}
/*********************************************************************************************
**********************************************************************************************
Direct_method_Compute
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Compute_First(){
/********************* Without reciprocity: *******************************************/
/* bodies_Compute_own_interaction_no_mutual() is not implemented ... */
/********************* With reciprocity: **********************************************/
/* Compute the force and the potential: */
bodies_Compute_own_interaction(&bodies);
/**************** Possible scaling with CONSTANT_INTERACTION_FACTOR: ********************/
/* We can also use CONSTANT_INTERACTION_FACTOR only for the total potential energy ... */
#ifdef _USE_CONSTANT_INTERACTION_FACTOR_
bodies_Scale_with_CONSTANT_INTERACTION_FACTOR(&bodies);
#endif /* #ifdef _USE_CONSTANT_INTERACTION_FACTOR_ */
}
/* Computation pour les blocs intermédiaire */
void Direct_method_Compute_Mid()
{
bodies_Compute_other_interaction(&bodies, pj_pos_x, pj_pos_y, pj_pos_z, pj_fx, pj_fy, pj_fz, p_values);
p_values += bodies.nb_bodies;
if (p_values == p_allvalues + bodies.nb_bodies * nb_proc)
p_values = p_allvalues;
}
/* Computation pour le demi-bloc final */
void Direct_method_Compute_Last()
{
int rank = (my_rank == 0 ? nb_proc : my_rank);
bodies_Compute_other_half_interaction(&bodies, pj_pos_x, pj_pos_y, pj_pos_z, pj_fx, pj_fy, pj_fz, p_values, rank <= (nb_proc / 2) ? 0 : 1);
}
/*********************************************************************************************
**********************************************************************************************
************************* Move of the bodies: ************************************************
Direct_method_Move : Leapfrog integrator ( Kick Drift Kick )
**********************************************************************************************
*********************************************************************************************/
void KnD_Direct_method_Move(REAL_T dt ){
/**** Kick N Drift ***/
/*
bodies_it_t it;
for (bodies_it_Initialize(&it, &bodies);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
bodies_Kick_Move(&it,dt);
bodies_Drift_Move(&it,dt);
}
*/
bodies_ind_t k;
#pragma omp parallel for schedule(static)
for (k = 0; k < bodies.nb_bodies; ++k)
{
bodies.p_speed_vectors[k].dat[0] += bodies.p_fx[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_speed_vectors[k].dat[1] += bodies.p_fy[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_speed_vectors[k].dat[2] += bodies.p_fz[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_pos_x[k] += bodies.p_speed_vectors[k].dat[0] * dt;
bodies.p_pos_y[k] += bodies.p_speed_vectors[k].dat[1] * dt;
bodies.p_pos_z[k] += bodies.p_speed_vectors[k].dat[2] * dt;
}
}
void K_Direct_method_Move(REAL_T dt ){
/************************* Move of the bodies: ******************************************/
/*
bodies_it_t it;
for (bodies_it_Initialize(&it, &bodies);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
bodies_Kick_Move(&it,dt);
}
*/
bodies_ind_t k;
#pragma omp parallel for schedule(static)
for (k = 0; k < bodies.nb_bodies; ++k)
{
// fprintf(stderr,"c=%d thread %d \n", k, omp_get_thread_num());
bodies.p_speed_vectors[k].dat[0] += bodies.p_fx[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_speed_vectors[k].dat[1] += bodies.p_fy[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_speed_vectors[k].dat[2] += bodies.p_fz[k] * (1 / bodies.p_values[k]) * (dt / 2);
}
}
/*********************************************************************************************
**********************************************************************************************
Direct_method_Terminate
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Terminate(){
bodies_Free(&bodies);
FMB_free(p_allvalues);
if (Direct_are_data_bzipped2){
/* We recompress the data file: */
bzip2_file(Direct_data_file);
}
FMB_free(Direct_data_file);
}
void Direct_method_Terminate2(){
bodies_Free(&bodies);
if (Direct_are_data_bzipped2){
bzip2_file(Direct_data_file);
}
FMB_free(Direct_data_file);
}
/*********************************************************************************************
**********************************************************************************************
sum
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Sum(char *results_file,
unsigned long step_number_value,
bodies_t *p_bodies,
VALUES_T total_potential_energy){
FILE *f_results;
position_t force_sum;
position_t force_sum_total;
bodies_it_t it;
f_results = f_output;
position_Initialize(&force_sum);
position_Initialize(&force_sum_total);
for (bodies_it_Initialize(&it, p_bodies);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
position_Set_x(&force_sum, position_Get_x(&force_sum) + bodies_it_Get_fx(&it));
position_Set_y(&force_sum, position_Get_y(&force_sum) + bodies_it_Get_fy(&it));
position_Set_z(&force_sum, position_Get_z(&force_sum) + bodies_it_Get_fz(&it));
}
MPI_Reduce(&force_sum, &force_sum_total, 3, MY_MPI_F, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_rank == 0)
{
fprintf(f_results, "Sum (force): ");
position_Display(&force_sum_total, f_results, high);
fprintf(f_results, "\n");
}
}
/*********************************************************************************************
**********************************************************************************************
save
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Dump_bodies(char *results_filename,
unsigned long step_number_value,
bodies_t *p_bodies)
{
bodies_it_t it;
/* Initialize Ouput operations: */
FMB_IO_InitO(results_filename);
if (FMB_IO_Info.output_format != NEMO_format){
/********** FMB file format: **********/
if (FMB_IO_Info.output_format == FMB_binary_format){
FMB_error("Unable to write the 'header' for FMB_binary_format in Direct_method_Dump_bodies(). \n");
}
FMB_IO_Print_header(step_number_value, FALSE /* only_position_and_value */,
bodies_Nb_bodies(p_bodies) * nb_proc, ¢er, half_side);
for (bodies_it_Initialize(&it, p_bodies);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
FMB_IO_Print_body_from_bodies_it(&it, FALSE /* only_position_and_value */);
} /* for */
} /* if (FMB_IO_Info.output_format != NEMO_format) */
else {
/********** NEMO file format: **********/
FMB_IO_Print_all_bodies_from_bodies_t(p_bodies);
} /* else (FMB_IO_Info.output_format != NEMO_format) */
/* Terminate Output operations: */
FMB_IO_TerminateO();
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireCacheView(image);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,AllChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
id;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=GetOpenMPMaximumThreads();
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (id=0; id < (ssize_t) number_threads; id++)
current_depth[id]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
register const PixelPacket
*restrict p;
register ssize_t
i;
p=image->colormap;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
if (status == MagickFalse)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=p->red != ScaleAnyToQuantum(ScaleQuantumToAny(p->red,
range),range);
if ((channel & GreenChannel) != 0)
status|=p->green != ScaleAnyToQuantum(ScaleQuantumToAny(p->green,
range),range);
if ((channel & BlueChannel) != 0)
status|=p->blue != ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,
range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=p->red != ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),
range);
if ((channel & GreenChannel) != 0)
status|=p->green != ScaleAnyToQuantum(ScaleQuantumToAny(p->green,
range),range);
if ((channel & BlueChannel) != 0)
status|=p->blue != ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,range),
range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
status|=p->opacity != ScaleAnyToQuantum(ScaleQuantumToAny(p->opacity,
range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=indexes[x] != ScaleAnyToQuantum(ScaleQuantumToAny(indexes[x],
range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double)
MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if (image->colorspace == CMYKColorspace)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
((Image *) image)->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (image->colorspace == CMYKColorspace)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (p->opacity != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,AllChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (GetImageDepth(image,&image->exception) <= (size_t)
MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH))
{
image->depth=depth;
return(MagickTrue);
}
/*
Scale pixels to desired depth.
*/
status=MagickTrue;
range=GetQuantumRange(depth);
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=ScaleAnyToQuantum(ScaleQuantumToAny(q->red,range),range);
if ((channel & GreenChannel) != 0)
q->green=ScaleAnyToQuantum(ScaleQuantumToAny(q->green,range),range);
if ((channel & BlueChannel) != 0)
q->blue=ScaleAnyToQuantum(ScaleQuantumToAny(q->blue,range),range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
q->opacity=ScaleAnyToQuantum(ScaleQuantumToAny(q->opacity,range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=ScaleAnyToQuantum(ScaleQuantumToAny(indexes[x],range),range);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*restrict p;
p=image->colormap;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
p->red=ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),range);
if ((channel & GreenChannel) != 0)
p->green=ScaleAnyToQuantum(ScaleQuantumToAny(p->green,range),range);
if ((channel & BlueChannel) != 0)
p->blue=ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,range),range);
if ((channel & OpacityChannel) != 0)
p->opacity=ScaleAnyToQuantum(ScaleQuantumToAny(p->opacity,range),
range);
p++;
}
}
image->depth=depth;
return(status);
}
|
wyhash.h | /* Author: Wang Yi <godspeed_china@yeah.net> */
#ifndef wyhash_version_3
#define wyhash_version_3
#include <stdint.h>
#include <string.h>
#if defined(_MSC_VER) && defined(_M_X64)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif
const uint64_t _wyp0=0xa0761d6478bd642full, _wyp1=0xe7037ed1a0b428dbull, _wyp2=0x8ebc6af09c88c6e3ull, _wyp3=0x589965cc75374cc3ull, _wyp4=0x1d8e4e27c47d124full;
static inline uint64_t _wyrotr(uint64_t v, unsigned k) { return (v>>k)|(v<<(64-k)); }
static inline uint64_t _wymum(uint64_t A, uint64_t B) {
#ifdef WYHASH32
uint64_t hh=(A>>32)*(B>>32), hl=(A>>32)*(unsigned)B, lh=(unsigned)A*(B>>32), ll=(uint64_t)(unsigned)A*(unsigned)B;
return _wyrotr(hl,32)^_wyrotr(lh,32)^hh^ll;
#else
#ifdef __SIZEOF_INT128__
__uint128_t r=A; r*=B; return (r>>64)^r;
#elif defined(_MSC_VER) && defined(_M_X64)
A=_umul128(A, B, &B); return A^B;
#else
uint64_t ha=A>>32, hb=B>>32, la=(uint32_t)A, lb=(uint32_t)B, hi, lo;
uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
lo=t+(rm1<<32); c+=lo<t;hi=rh+(rm0>>32)+(rm1>>32)+c; return hi^lo;
#endif
#endif
}
static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v; }
static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v; }
static inline uint64_t _wyr3(const uint8_t *p, unsigned k) { return (((uint64_t)p[0])<<16)|(((uint64_t)p[k>>1])<<8)|p[k-1]; }
static inline uint64_t wyhash(const void* key, uint64_t len, uint64_t seed) {
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
if(__builtin_expect(!len,0)) return _wymum(_wymum(seed^_wyp0,seed^_wyp1),_wyp4);
#else
if(!len) return _wymum(_wymum(seed^_wyp0,seed^_wyp1),_wyp4);
#endif
const uint8_t *p=(const uint8_t*)key;
if(len<4) return _wymum(_wymum(_wyr3(p,len)^seed^_wyp0,seed^_wyp1),len^_wyp4);
else if(len<=8) return _wymum(_wymum(_wyr4(p)^seed^_wyp0,_wyr4(p+len-4)^seed^_wyp1),len^_wyp4);
else if(len<=16) return _wymum(_wymum(_wyr8(p)^seed^_wyp0,_wyr8(p+len-8)^seed^_wyp1),len^_wyp4);
else if(len<=24) return _wymum(_wymum(_wyr8(p)^seed^_wyp0,_wyr8(p+8)^seed^_wyp1)^_wymum(_wyr8(p+len-8)^seed^_wyp2,seed^_wyp3),len^_wyp4);
else if(len<=32) return _wymum(_wymum(_wyr8(p)^seed^_wyp0,_wyr8(p+8)^seed^_wyp1)^_wymum(_wyr8(p+16)^seed^_wyp2,_wyr8(p+len-8)^seed^_wyp3),len^_wyp4);
uint64_t see1=seed, i=len;
if(i>=256) for(; i>=256; i-=256,p+=256) {
seed=_wymum(_wyr8(p)^seed^_wyp0,_wyr8(p+8)^seed^_wyp1)^_wymum(_wyr8(p+16)^seed^_wyp2,_wyr8(p+24)^seed^_wyp3);
see1=_wymum(_wyr8(p+32)^see1^_wyp1,_wyr8(p+40)^see1^_wyp2)^_wymum(_wyr8(p+48)^see1^_wyp3,_wyr8(p+56)^see1^_wyp0);
seed=_wymum(_wyr8(p+64)^seed^_wyp0,_wyr8(p+72)^seed^_wyp1)^_wymum(_wyr8(p+80)^seed^_wyp2,_wyr8(p+88)^seed^_wyp3);
see1=_wymum(_wyr8(p+96)^see1^_wyp1,_wyr8(p+104)^see1^_wyp2)^_wymum(_wyr8(p+112)^see1^_wyp3,_wyr8(p+120)^see1^_wyp0);
seed=_wymum(_wyr8(p+128)^seed^_wyp0,_wyr8(p+136)^seed^_wyp1)^_wymum(_wyr8(p+144)^seed^_wyp2,_wyr8(p+152)^seed^_wyp3);
see1=_wymum(_wyr8(p+160)^see1^_wyp1,_wyr8(p+168)^see1^_wyp2)^_wymum(_wyr8(p+176)^see1^_wyp3,_wyr8(p+184)^see1^_wyp0);
seed=_wymum(_wyr8(p+192)^seed^_wyp0,_wyr8(p+200)^seed^_wyp1)^_wymum(_wyr8(p+208)^seed^_wyp2,_wyr8(p+216)^seed^_wyp3);
see1=_wymum(_wyr8(p+224)^see1^_wyp1,_wyr8(p+232)^see1^_wyp2)^_wymum(_wyr8(p+240)^see1^_wyp3,_wyr8(p+248)^see1^_wyp0);
}
for(; i>=32; i-=32,p+=32) { seed=_wymum(_wyr8(p)^seed^_wyp0,_wyr8(p+8)^seed^_wyp1); see1=_wymum(_wyr8(p+16)^see1^_wyp2,_wyr8(p+24)^see1^_wyp3); }
if(!i) {}
else if(i<4) seed=_wymum(_wyr3(p,i)^seed^_wyp0,seed^_wyp1);
else if(i<=8) seed=_wymum(_wyr4(p)^seed^_wyp0,_wyr4(p+i-4)^seed^_wyp1);
else if(i<=16) seed=_wymum(_wyr8(p)^seed^_wyp0,_wyr8(p+i-8)^seed^_wyp1);
else if(i<=24) { seed=_wymum(_wyr8(p)^seed^_wyp0,_wyr8(p+8)^seed^_wyp1); see1=_wymum(_wyr8(p+i-8)^see1^_wyp2,see1^_wyp3); }
else { seed=_wymum(_wyr8(p)^seed^_wyp0,_wyr8(p+8)^seed^_wyp1); see1=_wymum(_wyr8(p+16)^see1^_wyp2,_wyr8(p+i-8)^see1^_wyp3); }
return _wymum(seed^see1,len^_wyp4);
}
static inline uint64_t wyhash64(uint64_t A, uint64_t B) { return _wymum(_wymum(A^_wyp0, B^_wyp1), _wyp2); }
static inline double wy2u01(uint64_t r) { const double _wynorm=1.0/(1ull<<52); return (r>>11)*_wynorm; }
static inline float wy2gau(uint64_t r) { const float _wynorm1=1.0f/(1ull<<15); return (((r>>16)&0xffff)+((r>>32)&0xffff)+(r>>48))*_wynorm1-3.0f; }
static inline uint64_t wyrand(uint64_t *seed) { *seed+=_wyp0; return _wymum(*seed^_wyp1,*seed); }
static uint64_t _wyrand_seed=0;
#define WYRAND_MAX 0xffffffffffffffffull
static inline void wysrand(uint64_t seed) { _wyrand_seed=seed; }
static inline uint64_t wygrand(void) {
uint64_t s;
#if defined(_OPENMP)
#pragma omp atomic capture
#endif
{
_wyrand_seed+=_wyp0; s=_wyrand_seed;
}
return _wymum(s^_wyp1,s);
}
#endif
|
graphProcessingSgIncGraph.h | /*
FINISH TEMPFLATPATH CODE
*/
// Original Author (SgGraphTraversal mechanisms): Michael Hoffman
//$id$
#include<omp.h>
#include <boost/regex.hpp>
#include <iostream>
#include <fstream>
#include <string>
/**
*@file graphProcessing.h
*Brief Overview of Algorithm:
***********************
*Current Implementation
***********************
*The idea behind the algorithm here is to decompose the given Control Flow Graph into a Tree structure (still stored as a Control Flow Graph, though it may be possible to change this). This tree splits on children of a graph node, but does not connect in the case of multiple parents of a single node. In this we refer to out nodes as "children" and in nodes as "parents". However, we do this from the end node to the start node. This is best because then you get one value on the end node, and that value is the only one we want (however, the function takes a pointer to an empty SgIncidenceDirectedGraph as an argument, this can then be analyzed post traversal if that is wanted.
*Also, following the algorithm explained above, one must realize that the tree has one leaf for EACH path. Thus small programs can lead from a small-ish graph to a VERY large tree. For example, a program with 20 if else statements would end with 2^20 paths, which means the number of nodes in the tree is greater than this. Thus with 32 or 64 you can overflow a 32 or 64 bit integer.
*However, this can be partially resolved by setting the deleteTree boolean to true. This is set to false as the default, however if you don't need the tree then deleteTree will allow you to deal with much larger trees and thus much larger original graphs.
*Realize that because of the potentially massive growth rate of path number, that in large cases path enumeration and counting is extremely prohibitive, as it is not difficult to create a program which has more paths than 2^64, thus an unsigned (signed?) 64 bit integer could not store the number.
*Further, this is still a relatively compact method. You could just as easily force enumeration of paths, which could in some cases drastically increase the number of nodes necessary to store all the information.
*The advantage of the tree structure is two-fold. First it relieves the algorithm of having to keep even more in memory than it has to at the moment, and if you want to deal with GoTo statements, one case can develop that cannot be solved otherwise, e.g.
*Consider the four node tree with nodes a, b, c, d, and edges atb, atc, btc, ctb, btd, ctd. There are FOUR legitimate paths here (a, b, d; a, b, c, d; a, c, d; a, c, b, d), and any other method would recognize this as a loop and, without a special algorithm for loop behavior, this would be ignored.
*The tree structure also allows for very strong parallelization, currently implemented in openMP. This is because depth has meaning in terms of a tree (it doesn't in terms of a general graph) and that you know you can evaluate all nodes at a certain depth if you have solved all the nodes at depth + 1.
**************************
*Further Improvements: TODO
**************************
@todo *One improvement that should be implemented ASAP is changing the algorithm from a recursive algorithm to an iterative algorithm. Keeping the memory requirements down is much easier in this form and would probably increase the size of graph that the algorithm can handle.
@todo *Another improvement that should be implemented when possible is to allow for loop analysis. This could be implemented by simply running the algorithm on the loop, but there would need to be a provision that kept the algorithm from stopping as soon as it starts. This could be done by separating the node into two nodes, one with all the inedges and one with all the outedges. OR one could collect the loops when they are deleted (the whole loop is calculated necessarily), though nested loops would have to be considered further in order to find a way to deal with them.
@todo *It is possible that graph matching algorithms might prove useful to distinguish different types of graphs contained within the CFG and optimize traversal over them. Look up graph matching algorithms or pattern matching (potentially) for more information on such algorithms, though I do not believe there is an existant literature on matching subgraphs for this purpose.
@todo *The parallelism in this program should be optimized by someone experienced in parallelization optimization
***************
*Contact Info
***************
*Finally, blame can be assigned to and questions can be forwarded to the author, though response is not guaranteed
*archangel DOT associate AT gmail DOT com
*or, if I'm still at Lawrence
*hoffman34 AT llnl DOT gov
*@author Michael Hoffman
*/
#include "staticCFG.h"
#include <vector>
#include <algorithm>
#include <utility>
#include <iostream>
#include <sys/time.h>
#include <sys/resource.h>
//#include "graphBot.h"
//This is necessary for technical reasons with regards to the graphnodeinheritedmap
struct Bot {
std::vector<std::vector<SgGraphNode*> > path;
std::vector<std::set<SgGraphNode*> > pthloops;
std::vector<SgGraphNode*> currpth;
std::vector<std::pair<SgGraphNode*, int> > nodelst;
bool on;
bool remove;
};
double timeDifference(const struct timeval& end, const struct timeval& begin)
{
return (end.tv_sec + end.tv_usec / 1.0e6) - (begin.tv_sec + begin.tv_usec / 1.0e6);
}
static inline timeval getCPUTime() {
rusage ru;
getrusage(RUSAGE_SELF, &ru);
return ru.ru_utime;
}
struct compareSgGraphNode {
bool operator()(const SgGraphNode* a, const SgGraphNode* b) const
{
return a==b;
}
};
/* The SgGraphTraversal class is utilized specifically for StaticCFG traversals,
though the input must be in terms of a SgIncidenceDirectedGraph*/
template <class InheritedAttributeType, class SynthesizedAttributeType>
class SgGraphTraversal
{
public:
std::set<std::map<int, std::set<int> > > subpathmap;
int loopNum;
int nullNum;
std::set<SgDirectedGraphEdge*> nullEdgesOrdered;
std::map<SgGraphNode*, int> loopNumMap;
std::map<SgGraphNode*, int> pathValMap;
int nullloops;
std::vector<std::vector<SgGraphNode*> > looppaths;
std::vector<std::vector<SgGraphNode*> > iLoops;
std::vector<SgGraphNode*> ifstatements;
virtual ~SgGraphTraversal();
SgGraphTraversal();
// Copy operations
int nullEdgesPaths;
int turns;
SgGraphTraversal(const SgGraphTraversal &);
const SgGraphTraversal &operator=(const SgGraphTraversal &);
//This is not used, but will be important if SynthesizedAttributes become useful
typedef StackFrameVector<SynthesizedAttributeType> SynthesizedAttributesList;
//one of the most important structures in the algorithm, this attaches SgGraphNode*s to InheritedAttributeTypes so that
//looking up the values is possible.
//int numnodes;
//std::map<SgGraphNode*, InheritedAttributeType> seen;
int numnodes;
//InheritedAttributeType pthgraphinherit;
//StaticCFG::CFG* SgCFG;
SgGraphNode* nullnode;
std::map<SgGraphNode*, int> primenode;
bool done;
//std::set<SgGraphNode*> startnodes;
std::set<SgGraphNode*> lstN;
std::map<SgGraphNode*, std::vector<std::set<int> > > lstordmap;
std::set<SgGraphNode*> solvedLoops;
std::map<SgGraphNode*, std::vector<SgGraphNode*> > checkednodes;
std::map<SgGraphNode*, std::set<SgGraphNode*> > downed;
//std::map<SgGraphNode*, int> nodeinedgordmap;
//a value for nodes that have no value, set in the traverse function
InheritedAttributeType nullInherit;
//the user invoked function, runs the algorithm
InheritedAttributeType traverse(SgGraphNode* basenode, SgIncidenceDirectedGraph* g,
InheritedAttributeType inheritedValue, InheritedAttributeType nullInherit,
SgGraphNode* endnode, bool insep = false, bool pcHk = false);
std::set<SgGraphNode*> loopSet;
protected:
//User defined functions to do whatever is needed in evaluation
//All the user gets access to is the node in question
//and the values of the parent nodes (this should be all that is necessary)
virtual InheritedAttributeType evaluateInheritedAttribute(SgGraphNode* n,
std::vector<InheritedAttributeType> inheritedValues) = 0;
//Not used, but may be useful if SynthesizedAttributes become workable in this context
virtual SynthesizedAttributeType evaluateSynthesizedAttribute(SgGraphNode* n,
InheritedAttributeType in,
SynthesizedAttributesList l) = 0;
#if !USE_ROSE
// DQ (11/3/2011): EDG compilains about this (but GNU allowed it, I think that EDG might be correct,
// namely that the value of a reference must be an lvalue (not NULL). But since we are only trying
// to compile ROSE with ROSE (using the new EDG 4.3 front-end as a tests) we can just skip this case for now.
virtual void pathAnalyze(std::vector<SgGraphNode*>& pth, bool loop=false, std::set<std::vector<SgGraphNode*> >& incloops=NULL) = 0;
#else
virtual void pathAnalyze(std::vector<SgGraphNode*>& pth, bool loop, std::set<std::vector<SgGraphNode*> >& incloops) = 0;
#endif
//also not used, but important for possible later use of SynthesizedAttributes
SynthesizedAttributeType defaultSynthesizedAttribute(InheritedAttributeType);
private:
double distime;
//std::set<std::pair<std::pair<SgGraphNode*, SgGraphNode*>, std::pair<SgGraphNode*, SgGraphNode*> > > flpset;
//std::set<std::pair<std::pair<SgGraphNode*, SgGraphNode*>, std::pair<SgGraphNode*, SgGraphNode*> > > goodset;
std::set<SgGraphNode*> ploops;
std::map<SgGraphNode*, std::set<std::vector<SgGraphNode*> > > lpbegins;
std::map<SgGraphNode*, int> frksLeft;
int currm;
int dpMax;
int repEval;
bool pathCheck;
int pathsSize;
//this constructs the graph tree for computation of inheritedValues
std::map<SgGraphNode*, InheritedAttributeType> known;
std::vector<InheritedAttributeType> connectNodes;
std::map<SgGraphNode*, bool> solved;
std::set<SgGraphNode*> solvedset;
//these two are not used, but will be important if SynthesizedAttributes are made reasonable in this context
SynthesizedAttributesList *synthesizedAttributes;
SynthesizedAttributeType traversalResult();
//finally we have two functions necessary for parallel processing if that is chosen to be used by the user
std::map<SgGraphNode*, int> nodeInEdgesNum;
int currprime;
std::vector<SgGraphNode*> endnodefakes;
std::map<SgGraphNode*, std::vector<std::vector<SgGraphNode*> > > pathsAtMk;
std::set<SgGraphNode*> mkloops;
std::map<SgGraphNode*, std::set<std::vector<SgGraphNode*> > > mkloopmap;
std::map<SgGraphNode*, std::set<std::vector<SgGraphNode*> > > subPathsAtMk;
std::vector<SgGraphNode*> mkglobal;
std::vector<SgGraphNode*> clglobal;
bool inseparable;
void solvePaths(SgIncidenceDirectedGraph* g, SgGraphNode* n, SgGraphNode* endnode);
std::vector<std::set<SgGraphNode*> > closuresVec;
void evaluatePaths(SgIncidenceDirectedGraph* g, SgGraphNode* realstartnode, SgGraphNode* endnode);
void evaluatePathsPar(SgIncidenceDirectedGraph* g, SgGraphNode* realstartnode, SgGraphNode* endnode);
bool disjoint(std::vector<SgGraphNode*>& path, std::vector<SgGraphNode*>& vec2) const;
std::set<std::vector<SgGraphNode*> > flatpaths;
// void evalNode(SgIncidenceDirectedGraph* g, SgGraphNode* n);
bool canSolve(SgIncidenceDirectedGraph* g, SgGraphNode* n);
std::map<SgGraphNode*, InheritedAttributeType> inhVals;
std::set<SgDirectedGraphEdge*> seenEdges;
std::set<SgDirectedGraphEdge*> nullEdges;
std::set<SgGraphNode*> clsT;
void computeOrder(SgIncidenceDirectedGraph* g, SgGraphNode* n, SgGraphNode* endnode);
void computeInheritedOrdered(SgIncidenceDirectedGraph* g, SgGraphNode* n);
std::pair<bool, SgGraphNode*> getNextPar(SgIncidenceDirectedGraph* g, SgGraphNode* n);
std::pair<bool, SgGraphNode*> getNextChild(SgIncidenceDirectedGraph* g, SgGraphNode* n);
bool computable(SgIncidenceDirectedGraph* g, SgGraphNode* n);
void evalNodeOrdered(SgIncidenceDirectedGraph* g, SgGraphNode* n);
std::map<SgGraphNode*, int> oVals;
bool canEval(SgIncidenceDirectedGraph* g, SgGraphNode* n);
void setPathVal(SgIncidenceDirectedGraph*g, SgGraphNode* n);
void printNodePlusEdgesForAnalysis(SgIncidenceDirectedGraph* g, SgGraphNode* n, int loopNum, int pathVal, std::ofstream& ss);
void printNodePlusEdgesForAnalysisPath(SgIncidenceDirectedGraph* g, std::vector<SgGraphNode*> n, int loopNum, int pathVal, std::ofstream& ss);
void printNodeForAnalysis(SgGraphNode* n, int loopNum, int pathNum, std::ofstream& ss);
std::set<SgGraphNode*> completedNodesPath;
std::set<std::pair<SgGraphNode*, SgGraphNode*> > completedEdgesPath;
void printEdgeForAnalysis(SgDirectedGraphEdge* e, bool isNullEdge, std::ofstream& ss);
void printEdgeForAnalysisPath(SgGraphNode* g1, SgGraphNode* g2, std::ofstream& ss);
std::map<int, SgGraphNode*> iVals;
std::set<SgDirectedGraphEdge*> nullEdgesOrderedOut;
std::set<SgDirectedGraphEdge*> completedEdgesOut;
std::set<SgDirectedGraphEdge*> completedEdges;
std::set<SgGraphNode*> compPar;
std::set<SgGraphNode*> compChild;
std::set<SgGraphNode*> computedNodes;
SgGraphNode* st;
SgGraphNode* en;
double fllp;
int loopnum;
//std::set<SgGraphNode*> solved;
//InheritedAttributeType findAndReverse(SgGraphNode* n, SgIncidenceDirectedGraph* g);
//evaluateAllInheritedAttribute(std::vector<InheritedAttributeType> endNodeInhVec, SgGraphNode* endnode, std::vector<SgGraphNode*> nodes, std::vector<InheritedAttributeType> inh);
//std::vector<InheritedAttributeType> getZeroInhs(std::vector<std::vector<std::vector<SgGraphNode*> > > qAnsSetSet, std::vector<InheritedAttributeType> endnodeInhVec, SgGraphNode* node);
};
/*
template <class InheritedAttributeType, class SynthesizedAttributeType>
void
GraphBot<InheritedAttributeType, SynthesizedAttributeType>::
travelDown(SgIncidenceDirectedGraph* g) {
std::set<SgDirectedGraphEdge*> oedgs = g->computeEdgeSetOut(iAmHere);
bool taken = false;
if (oedgs.size() > 1) {
std::set<SgDirectedGraphEdge*> edgeTrav = clEdgeTrav[iAmHere];
ROSE_ASSERT(clEdgeTrav.find(iAmHere) != clEdgeTrav.end());
for (std::set<SgDirectedGraphEdge*>::iterator i = oedgs.begin(); i != oedgs.end(); i++) {
if (edgTrav.find(*i) == edgTrav.end() || !taken) {
taken = true;
iAmHere = (*i)->get_to();
lastEdge = *i;
}
}
}
else {
iAmHere = (*(oedgs.begin())->get_to();
}
}
*/
/*
***************************
Various Admin Functions
***************************
*/
template<class InheritedAttributeType, class SynthesizedAttributeType>
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
SgGraphTraversal()
: synthesizedAttributes(new SynthesizedAttributesList())
{
}
#ifndef SWIG
template<class InheritedAttributeType, class SynthesizedAttributeType>
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
~SgGraphTraversal()
{
ROSE_ASSERT(synthesizedAttributes != NULL);
delete synthesizedAttributes;
synthesizedAttributes = NULL;
}
#endif
template<class InheritedAttributeType, class SynthesizedAttributeType>
const SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType> &
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
operator=(const SgGraphTraversal &other)
{
ROSE_ASSERT(synthesizedAttributes != NULL);
delete synthesizedAttributes;
synthesizedAttributes = other.synthesizedAttributes->deepCopy();
return *this;
}
/**
This is the function that is used by the user directly to start the algorithm. It is immediately available to the user
SgGraphTraversal::traverse
Input:
@param[n] n starting node
@param[g] SgIncidenceDirectedGraph* g, CFG calculated previously
@param[inheritedValue] InheritedAttributeType inheritedValue, value of the starting node
@param[nullI] InheritedAttributeType nullI, value of the null Attribute, i.e. what to attribute to a node with no value\
@param[endnode] SgGraphNode* endnode, final node
@param[insep] boolean to decide inseparability of the analysis function, not yet in use, set automatically to false
@param[pCh] deprecated, set to false
@return InheritedAttributeType, the value of the attribute at the end node
*/
template<class InheritedAttributeType, class SynthesizedAttributeType>
InheritedAttributeType
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
traverse(SgGraphNode* n, SgIncidenceDirectedGraph* g, InheritedAttributeType inheritedValue, InheritedAttributeType nullI, SgGraphNode* endnode, bool insep, bool pCh) {
//numnodes = 0;
//primes.clear();
looppaths.clear();
iLoops.clear();
completedEdgesPath.clear();
pathValMap.clear();
loopNumMap.clear();
nullloops = 0;
nullEdgesPaths = 0;
fllp = 0.0;
mkglobal.clear();
clglobal.clear();
lpbegins.clear();
//currents.clear();
inhVals.clear();
iVals.clear();
oVals.clear();
//reservedEdges.clear();
completedEdges.clear();
completedEdgesOut.clear();
//completedNodes.clear();
computedNodes.clear();
nullEdgesOrdered.clear();
nullEdgesOrderedOut.clear();
loopSet.clear();
pathsAtMk.clear();
st = n;
en = endnode;
distime = 0.0;
int currm = 1;
int turns = 0;
pathsSize = 0;
done = false;
numnodes = 1;
std::cout << "starting traversal" << std::endl;
pathCheck = pCh;
currprime = 1;
inseparable = insep;
synthesizedAttributes->resetStack();
ROSE_ASSERT(synthesizedAttributes->debugSize() == 0);
//SgCFG = cfg;
inhVals[n] = inheritedValue;
//GraphBot<InheritedAttributeType, SynthesizedAttributeType>::inhVals[n] = inheritedValue;
//primes = generatePrimesSieve();
// graphnodeinheritedordmap[ncpy] = inheritedValue;
// nodenodeordmap[ncpy] = n;
// std::vector<SgGraphNode*> lst;
// lst.push_back(n);
// lstordmap[ncpy] = lst;
nullInherit = nullI;
InheritedAttributeType inh;
struct timeval t1, t2, t3, t4, t5, t6, t7, t8;
//else {
loopnum = 0;
//InheritedAttributeType inh;
t1 = getCPUTime();
//this function essentially sets up for the evaluate later, it makes putting together the paths much easier
solvePaths(g, n, endnode);
t2 = getCPUTime();
//making sure that endnode hasn't already been evaluated before the traversal starts, unlikely but just in case
ROSE_ASSERT(inhVals.find(endnode) == inhVals.end());
std::cout << "solvePaths done" << std::endl;
double diff = timeDifference(t2, t1);
t5 = getCPUTime();
//InheritedAttributeType pthgraphinherit = botTraverse(g, n, endnode);
oVals[n] = 0;
iVals[0] = n;
pathValMap[n] = 1;
//inserting n as a computed node
computedNodes.insert(n);
//computes the order in which the nodes must be evaluated, makes computeInheritedOrdered much faster
computeOrder(g, n, endnode);
std::cout << "order computed" << std::endl;
//computes the nodal inheritance values
computeInheritedOrdered(g, n);
std::cout << "inheritance computed" << std::endl;
ROSE_ASSERT(oVals.find(endnode) != oVals.end());
ROSE_ASSERT(inhVals.find(endnode) != inhVals.end());
//value at the endnode
InheritedAttributeType pthgraphinherit = inhVals[endnode];
//= evaluateGraph(g, n, endnode, inheritedValue);
t6 = getCPUTime();
std::cout << "evaluateGraph done" << std::endl;
double diff3 = timeDifference(t6, t5);
t3 = getCPUTime();
//actually evaluates every path with a user defined pathAnalyze function
//for (int i = 0; i < 10; i++) {
evaluatePaths(g, n, endnode);
//}
t4 = getCPUTime();
t7 = getCPUTime();
//evaluatePathsPar(g, n, endnode);
t8 = getCPUTime();
std::cout << "evaluatePaths done " << std::endl;
double diff2 = timeDifference(t4, t3);
double diff2Par = timeDifference(t8, t7);
std::cout << "pathsolve took: " << diff << std::endl;
std::cout << "patheval took: " << diff2 << std::endl;
std::cout << "parpatheval took: " << diff2Par << std::endl;
std::cout << "grapheval took: " << diff3 << std::endl;
std::cout << "entire pathsolve took: " << diff+diff2+diff3+diff2Par << std::endl;
std::cout << "potential loops: " << nullEdgesOrdered.size() << std::endl;
std::cout << "nullNum: " << nullNum << std::endl;
//std::cout << "goodsets: " << goodset.size() << std::endl;
//std::cout << "flpsets: " << flpset.size() << std::endl;
std::cout << "mkloops: " << mkloops.size() << std::endl;
std::cout << "distime: " << distime << std::endl;
std::cout << "fllp: " << fllp << std::endl;
return pthgraphinherit;
//}
//std::cout << "number of endnodefakes: " << endnodefakes.size() << std::endl;
//std::cout << "should be number of nodes: " << currprime << std::endl;
//if (pathanalysis == true) {
// analyzepaths(endnode, g);
//}
//return inh;
//Currently this is not very useful, but it does nothing if traversalResult is not set.
}
/* WARNING:
This is not a general is_disjoint. It skips the
first element of the second set because in the way I assemble
paths the last element of the path and the first element of addend
must be the same. Hence I simply skip the first node
*/
bool is_disjoint(std::set<SgGraphNode*> set1, std::set<SgGraphNode*> set2) {
if (set1.empty() || set2.empty()) {
return true;
}
std::set<SgGraphNode*>::iterator it1 = set1.begin();
std::set<SgGraphNode*>::iterator it2 = set2.begin();
std::set<SgGraphNode*>::iterator it1End = set1.end();
std::set<SgGraphNode*>::iterator it2End = set2.end();
if (*it1 > *set2.rbegin() || *it2 > *set1.rbegin()) {
return true;
}
while (it1 != it1End && it2 != it2End) {
if (*it1 == *it2) {
return false;
}
if (*it1 < *it2) {
it1++;
}
else {
it2++;
}
}
return true;
}
//Checks for disjoint, necessary in computing the paths
template<class InheritedAttributeType, class SynthesizedAttributeType>
bool
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
disjoint(std::vector<SgGraphNode*>& pthloops, std::vector<SgGraphNode*>& vec2) const {
/*
time_t t1, t2;
time(&t1);
int a = 0;
std::set<SgGraphNode*> s1;
std::set<SgGraphNode*> s2;
std::vector<SgGraphNode*> mkloopvec;
bool goodsetbool;
bool pbool = true;
//std::cout << "calculating disjoint" << std::endl;
ROSE_ASSERT((path.back()).back() == vec2.front());
//copy(vec2.begin(), vec2.end(), inserter(s2, s2.end()));
/*
for (int i = 0; i < vec2.size(); i++) {
if (ploops.find(vec2[i]) != ploops.end()) {
pbool = false;
}
}
if (pbool) {
return true;
}
if (
*/ //for (int q = 0; q < pthloops->size(); q++) {
for (int i = 0; i < pthloops.size(); i++) {
if (find(vec2.begin(), vec2.end(), pthloops[i]) != vec2.end()) {
return false;
}
}
return true;
}
/*
if (pbool) {
time(&t2);
double diff = difftime(t2, t1);
distime += diff;
return true;
}
for (unsigned int k = 0; k < path.size(); k++) {
s1.clear();
*/
/*
pbool = true;
for (int p = 0; p < path[k].size(); p++) {
if (ploops.find(path[k][p]) != ploops.end()) {
pbool = false;
}
}
// copy(path[k].begin(), path[k].end(), inserter(s1, s1.end()));
if (!pbool) {
*/
/*
std::pair<std::pair<SgGraphNode*, SgGraphNode*>, std::pair<SgGraphNode*, SgGraphNode*> > flp;
flp.second.first = vec2[0];
flp.second.first = vec2[1];
flp.first.first = path[k][0];
flp.first.second = path[k][1];
if (vec2.front() == vec2.back()) {
time(&t2);
double diff = difftime(t2, t1);
distime += diff;
return false;
}
if (flpset.find(flp) != flpset.end()) {
//std::cout << "already seen" << std::endl;
time(&t2);
double diff = difftime(t2, t1);
distime += diff;
return false;
}
*/
/*
else if (goodset.find(flp) != goodset.end()) {
goodsetbool = true;
}
*/
/*
if (is_disjoint(s1,s2)) {
//goodset.insert(flp);
continue;
}
else {
return false;
}
*/
/*
else {
std::vector<SgGraphNode*> vec1 = path[k];
//for (unsigned int i = 0; i < vec1.size(); i++) {
for (unsigned int j = 0; j < mkloopvec.size(); j++) {
std::vector<SgGraphNode*>::iterator q = find(vec1.begin(), vec1.end(), mkloopvec[j]);
if (q != vec1.end()) {
if (*q != vec1[vec1.size() - 1] || j != 0) {
flpset.insert(flp);
// std::cout << "not disjoint" << std::endl;
time(&t2);
double diff = difftime(t2, t1);
distime += diff;
return false;
}
}
}
//}
//goodset.insert(flp);
}
}
//}
*/
/*
for (unsigned int p = 0; p < vec2.size(); p++) {
for (unsigned int q = 0; q < vec2.size(); q++) {
if (p != q) {
if (vec2[p] == vec2[q]) {
return false;
}
}
}
}
*/
/*
time(&t2);
double diff = difftime(t2, t1);
distime += diff;
return true;
}
*/
//checks for solvability of a node in nodal analysis
template<class InheritedAttributeType, class SynthesizedAttributeType>
bool
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
canSolve(SgIncidenceDirectedGraph* g, SgGraphNode* n) {
bool loop = false;
if (inhVals.find(n) != inhVals.end()) {
return true;
}
std::set<SgDirectedGraphEdge*> oed = g->computeEdgeSetIn(n);
if (oed.size() == 0) {
return false;
}
for (std::set<SgDirectedGraphEdge*>::iterator i = oed.begin(); i != oed.end(); i++) {
if (inhVals.find((*i)->get_from()) == inhVals.end() && nullEdges.find(*i) == nullEdges.end()) {
return false;
}
}
return true;
}
//this function evaluates values of paths via the user-defined pathAnalyze function
template<class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
evaluatePathsPar(SgIncidenceDirectedGraph* g, SgGraphNode* realstartnode, SgGraphNode* endnode) {
std::vector<std::vector<SgGraphNode*> > path;
std::vector<SgGraphNode*> spath;
SgGraphNode* n = realstartnode;
int successes = 0;
int failures = 0;
int j = 0;
std::vector<SgGraphNode*> currpthorg;
int currint = 0;
std::map<SgGraphNode*, int> intPath;
intPath[n] = currint;
currint++;
std::map<SgGraphNode*, int> currents;
SgGraphNode* currnode;
bool step = false;
bool midstep = false;
//note: pathsAtMk is referring to subpaths connected to that marker, a marker is a split in the graph (usually an if statement)
std::vector<std::vector<SgGraphNode*> > pth = pathsAtMk[realstartnode];
std::vector<std::vector<SgGraphNode*> > cpth = pathsAtMk[realstartnode];
path.clear();
int disjoints = 0;
int disjointtrues = 0;
currpthorg = pth[0];
intPath[pth[0].front()] = currint;
std::set<SgGraphNode*> pthloopstmp;
SgGraphNode* fakenode;
pthloopstmp.insert(fakenode);
std::vector<std::set<SgGraphNode*> > pthloops;
pthloops.push_back(pthloopstmp);
pthloopstmp.clear();
currint++;
int stepnum = 0;
std::vector<SgGraphNode*> rs;
rs.push_back(realstartnode);
path.push_back(rs);
currents.clear();
step = false;
std::vector<SgGraphNode*> sub;
std::set<std::vector<SgGraphNode*> > nullIncLoops;
std::vector<struct Bot*> todobotlst;
std::vector<struct Bot*> botlst;
struct Bot* rootBot = new Bot;
rootBot->remove = false;
rootBot->path = path;
rootBot->currpth = currpthorg;
rootBot->pthloops = pthloops;
rootBot->on = true;
botlst.push_back(rootBot);
int tip = 1;
int ti = 1;
std::vector<std::pair<std::vector<SgGraphNode*>, std::vector<std::set<SgGraphNode*> > > > collectedPaths;
int maxlst = 0;
while (true) {
if (todobotlst.size()+botlst.size() > maxlst) {
maxlst = todobotlst.size()+botlst.size();
std::cout << "maxlst: " << maxlst << std::endl;
std::cout << "todobotlst.size(): " << todobotlst.size() << std::endl;
std::cout << "botlst.size(): " << botlst.size() << std::endl;
}
int MAXBOTS = 10000;
int MINPATHS = 1000;
int LOCALMAXBOTS = 10;
int LOCALMAXNODES = 0;
std::vector<struct Bot*> lstnullbot;
std::vector<std::vector<struct Bot*> > newbotlsts (MAXBOTS, lstnullbot);
//std::vector<struct Bot*> newbotlsts (MAXBOTS, lstnullbot);
//tip = ti;
//ti = 0;
ROSE_ASSERT(botlst.size() >= 0);
if (botlst.size() == 0) {
if (todobotlst.size() != 0) {
while (todobotlst.size() > 0 && botlst.size() < MAXBOTS) {
todobotlst.back()->on = true;
botlst.push_back(todobotlst.back());
todobotlst.pop_back();
}
}
else {
if (collectedPaths.size() > 0) {
for (int i = 0; i < collectedPaths.size(); i++) {
std::set<std::vector<SgGraphNode*> > incloops;
std::vector<std::set<SgGraphNode*> > pthloops = collectedPaths[i].second;
for (int q = 0; q < pthloops.size(); q++) {
for (std::set<SgGraphNode*>::iterator p = pthloops[q].begin(); p != pthloops[q].end(); p++) {
for (std::set<std::vector<SgGraphNode*> >::iterator o = mkloopmap[*p].begin(); o != mkloopmap[*p].end(); o++) {
incloops.insert(*o);
}
}
}
pathAnalyze(collectedPaths[i].first, false, incloops);
}
collectedPaths.clear();
}
break;
}
}
if (botlst.size() > 0) {
std::pair<std::vector<SgGraphNode*>, std::vector<std::set<SgGraphNode*> > > nullpr;
std::vector<std::pair<std::vector<SgGraphNode*>, std::vector<std::set<SgGraphNode*> > > > newpathslst (MAXBOTS, nullpr);
#pragma omp parallel for
for (int i = 0; i < botlst.size(); i++) {
//std::map<SgGraphNode*, std::set<std::vector<SgGraphNode*> > > mkloopmaptmp = mkloopmap;
std::vector<struct Bot*> localbotlst;
std::pair<std::vector<SgGraphNode*>, std::vector<std::set<SgGraphNode*> > > localnewpath;
struct Bot* currBot = botlst[i];
if (currBot->on) {
std::vector<SgGraphNode*> currpth = currBot->currpth;
std::vector<std::vector<SgGraphNode*> > path = currBot->path;
std::vector<std::set<SgGraphNode*> > pthloops = currBot->pthloops;
if (currpth.back() == endnode) {
path.push_back(currpth);
std::vector<SgGraphNode*> flatpath;
std::set<std::vector<SgGraphNode*> > incloops;
struct timeval q1, q2;
ROSE_ASSERT(path.size() == pthloops.size() + 1);
q1 = getCPUTime();
for (unsigned int q = 0; q < pthloops.size(); q++) {
for (unsigned int r = 0; r < path[q].size(); r++) {
flatpath.push_back(path[q][r]);
}
/*
#pragma omp critical
{
for (std::set<SgGraphNode*>::iterator p = pthloops[q].begin(); p != pthloops[q].end(); p++) {
for (std::set<std::vector<SgGraphNode*> >::iterator o = mkloopmap[*p].begin(); o != mkloopmap[*p].end(); o++) {
incloops.insert(*o);
}
}
}
*/
}
for (unsigned int pt2 = 0; pt2 < path[path.size()-1].size(); pt2++) {
flatpath.push_back(path[path.size()-1][pt2]);
}
q2 = getCPUTime();
fllp += timeDifference(q2,q1);
flatpath.push_back(endnode);
//user defined function, run on the final path, gives the user loops that are included via "incloops" a set of vectors that contain the individual loops
/*
#pragma omp critical (analyze)
{
pathAnalyze(flatpath, false, incloops);
}
*/
std::pair<std::vector<SgGraphNode*> , std::vector<std::set<SgGraphNode*> > > newcol;
newcol.first = flatpath;
newcol.second = pthloops;
localnewpath = newcol;
incloops.clear();
int pts = pathsSize++;
pathsSize += 1;
flatpath.clear();
path.pop_back();
int rounds = 0;
bool starter = false;
// This gets a bit complicated so here is an overview:
// This is running down the graph and finding the endnode. Once it finds the endnode it goes back up to the last unevaluated subpath. It does this quickly with an integer that counts how many times that node has been used for a path. If this ends up being the number of outnodes, we don't need that node anymore, so we clear it to zero, then continue up the graph. We HAVE to reset because every time a new pathway is chosen above that node, it needs to have the ability to traverse that node.
/*
if (currBot->nodelst.size() != 0) {
while (path.back().back() != currBot->nodelst.back().first) {
ROSE_ASSERT(path.size() != 0);
path.pop_back();
pthloops.pop_back();
}
currBot->path = path;
currBot->pthloops = pthloops;
currBot->currpth = pathsAtMk[(path.back()).back()][currBot->nodelst.back().second];
currBot->nodelst.pop_back();
localbotlst.push_back(currBot);
}
else {
*/
currBot->remove = true;
localbotlst.push_back(currBot);
//}
}
else {
//this checks first to see if we have any loops in our path. If not it continues down, if there is it goes back to the last nonloop node
bool disj = true;
struct timeval tdisb, tdise;
//tdisb = getCPUTime();
for (int x = 0; x < pthloops.size(); x++) {
for (std::set<SgGraphNode*>::iterator j = pthloops[x].begin(); j != pthloops[x].end(); j++) {
if (find(currpth.begin(), currpth.end(), *j) != currpth.end()) {
disj = false;
}
}
}
//tdise = getCPUTime();
//distime += timeDifference(tdise, tdisb);
if (disj) {
disjointtrues++;
//std::cout << "disjoints: " << disjointtrues << std::endl;
midstep = false;
std::set<SgGraphNode*> pthloopstmp;
pthloopstmp.clear();
for (int i = 0; i < currpth.size(); i++) {
//currflat.push_back(currpth[i]);
if (mkloops.find(currpth[i]) != mkloops.end()) {
pthloopstmp.insert(currpth[i]);
}
}
pthloops.push_back(pthloopstmp);
path.push_back(currpth);
pthloopstmp.clear();
//std::set<std::vector<SgGraphNode*> > lpth;
std::vector<SgGraphNode*> oldcurrpth = currpth;
currpth.clear();
SgGraphNode* frontnode = (path.back()).front();
SgGraphNode* backnode = (path.back()).back();
ROSE_ASSERT(pathsAtMk.find(backnode) != pathsAtMk.end() || backnode == endnode);
ROSE_ASSERT(pathsAtMk.find(frontnode) != pathsAtMk.end());
std::vector<std::vector<SgGraphNode*> > tmppths = pathsAtMk[backnode];
currBot->currpth = tmppths[0];
currBot->path = path;
currBot->pthloops = pthloops;
//newbotlst.push_back(currBot);
for (int tp = 1; tp < tmppths.size(); tp++) {
//if (localbotlst.size() < LOCALMAXBOTS) {
/*
if (currBot->nodelst.size() < LOCALMAXNODES) {
std::pair<SgGraphNode*, int> cur;
cur.second = tp;
cur.first = path.back().back();
currBot->nodelst.push_back(cur);
}
else {
*/
struct Bot* newBot = new Bot;
newBot->remove = false;
newBot->currpth = tmppths[tp];
newBot->path = path;
newBot->pthloops = pthloops;
localbotlst.push_back(newBot);
//ti++;
// }
}
localbotlst.push_back(currBot);
//ti++;
}
else {
/*
if (currBot->nodelst.size() != 0) {
while (path.back().back() != currBot->nodelst.back().first) {
ROSE_ASSERT(path.size() != 0);
path.pop_back();
pthloops.pop_back();
}
currBot->path = path;
currBot->pthloops = pthloops;
currBot->currpth = pathsAtMk[(path.back()).back()][currBot->nodelst.back().second];
currBot->nodelst.pop_back();
localbotlst.push_back(currBot);
//ti++;
}
else {
*/
currBot->remove = true;
localbotlst.push_back(currBot);
//delete currBot;
// }
}
}
newpathslst[i] = localnewpath;
newbotlsts[i] = localbotlst;
}
}
botlst.clear();
int num = 0;
for (int i = 0; i < newbotlsts.size(); i++) {
if (newpathslst[i].first.size() > 0) {
collectedPaths.push_back(newpathslst[i]);
}
for (int j = 0; j < newbotlsts[i].size(); j++) {
if (newbotlsts[i][j]->remove == true) {
delete newbotlsts[i][j];
}
else if (num < MAXBOTS) {
newbotlsts[i][j]->on = true;
botlst.push_back(newbotlsts[i][j]);
num++;
}
else {
newbotlsts[i][j]->on = false;
todobotlst.push_back(newbotlsts[i][j]);
}
}
}
if (collectedPaths.size() > MINPATHS) {
for (int i = 0; i < collectedPaths.size(); i++) {
std::vector<std::set<SgGraphNode*> > pthloops;
std::set<std::vector<SgGraphNode*> > incloops;
pthloops = collectedPaths[i].second;
for (int q = 0; q < pthloops.size(); q++) {
for (std::set<SgGraphNode*>::iterator p = pthloops[q].begin(); p != pthloops[q].end(); p++) {
for (std::set<std::vector<SgGraphNode*> >::iterator o = mkloopmap[*p].begin(); o != mkloopmap[*p].end(); o++) {
incloops.insert(*o);
}
}
}
pathAnalyze(collectedPaths[i].first, false, incloops);
}
collectedPaths.clear();
}
}
else {
if (collectedPaths.size() > 0) {
for (int i = 0; i < collectedPaths.size(); i++) {
std::set<std::vector<SgGraphNode*> > incloops;
pthloops = collectedPaths[i].second;
for (int q = 0; q < pthloops.size(); q++) {
for (std::set<SgGraphNode*>::iterator p = pthloops[q].begin(); p != pthloops[q].end(); p++) {
for (std::set<std::vector<SgGraphNode*> >::iterator o = mkloopmap[*p].begin(); o != mkloopmap[*p].end(); o++) {
incloops.insert(*o);
}
}
}
pathAnalyze(collectedPaths[i].first, false, incloops);
}
}
collectedPaths.clear();
break;
}
}
std::cout << "successes: " << successes << std::endl;
std::cout << "failures: " << failures << std::endl;
std::cout << "maxlst: " << maxlst << std::endl;
return;
}
template<class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
evaluatePaths(SgIncidenceDirectedGraph* g, SgGraphNode* realstartnode, SgGraphNode* endnode) {
//std::set<SgGraphNode*> seen;
//for (std::map<SgGraphNode*, std::vector<std::vector<SgGraphNode*> > >::iterator i = pathsAtMk.begin(); i != pathsAtMk.end(); i++) {
/*
std::vector<std::vector<SgGraphNode*> > tocheck = (*i).second;
for (int j = 0; j < tocheck.size(); j++) {
for (int k = 0; k < tocheck[j].size(); k++) {
if (seen.find(tocheck[j][k]) != seen.end()) {
ploops.insert(tocheck[j][k]);
}
else {
seen.insert(tocheck[j][k]);
}
}
}
}
*/
std::vector<std::vector<SgGraphNode*> > path;
std::vector<SgGraphNode*> spath;
SgGraphNode* n = realstartnode;
int successes = 0;
int failures = 0;
int j = 0;
std::vector<SgGraphNode*> currpth;
int currint = 0;
std::map<SgGraphNode*, int> intPath;
intPath[n] = currint;
currint++;
std::map<SgGraphNode*, int> currents;
SgGraphNode* currnode;
bool step = false;
bool midstep = false;
//note: pathsAtMk is referring to subpaths connected to that marker, a marker is a split in the graph (usually an if statement)
std::vector<std::vector<SgGraphNode*> > pth = pathsAtMk[realstartnode];
std::vector<std::vector<SgGraphNode*> > cpth = pathsAtMk[realstartnode];
path.clear();
int disjoints = 0;
int disjointtrues = 0;
currpth = pth[0];
intPath[pth[0].front()] = currint;
std::set<SgGraphNode*> pthloopstmp;
SgGraphNode* fakenode;
pthloopstmp.insert(fakenode);
std::vector<std::set<SgGraphNode*> > pthloops;
pthloops.push_back(pthloopstmp);
pthloopstmp.clear();
currint++;
int stepnum = 0;
std::vector<SgGraphNode*> rs;
rs.push_back(realstartnode);
path.push_back(rs);
//currflat.push_back(realstartnode);
currents.clear();
step = false;
//std::vector<SgGraphNode*> currflat;
std::vector<SgGraphNode*> sub;
/*
std::ofstream mz;
mz.open("pathanalysis.dot");
mz << "digraph defaultName { \n";
*/
std::set<std::vector<SgGraphNode*> > nullIncLoops;
/*
for (unsigned int p = 0; p < looppaths.size(); p++) {
std::vector<SgGraphNode*> lp = looppaths[p];
for (unsigned int i = 0; i < lp.size()-1; i++) {
for (unsigned int l = i+1; l < lp.size(); l++) {
if (lp[i] == lp[l] && lp[i] != realstartnode && lp[i] != endnode) {
std::vector<SgGraphNode*> interiorloop;
interiorloop.clear();
for (unsigned int j = i; j < l+1; j++) {
interiorloop.push_back(lp[j]);
}
if (interiorloop.size() > 2) {
}
if (interiorloop.size() > 2 && interiorloop.back() != endnode) {
if (find(iLoops.begin(), iLoops.end(), interiorloop) == iLoops.end()) {
if (find(looppaths.begin(), looppaths.end(), interiorloop) == looppaths.end()) {
iLoops.push_back(interiorloop);
loopnum++;
for (unsigned int k = 0; k < interiorloop.size(); k++) {
loopNumMap[interiorloop[k]] = loopnum;
}
lpbegins[interiorloop.front()].insert(interiorloop);
pathAnalyze(interiorloop, true, nullIncLoops);
}
}
}
}
}
}
if (lp.size() > 2) {
lpbegins[lp.front()].insert(lp);
pathAnalyze(lp, true, nullIncLoops);
//for (unsigned int i = 1; i < lp.size(); i++) {
// printNodePlusEdgesForAnalysisPath(g, lp, p, p, mz);
//}
}
}
*/
while (step == false) {
stepnum++;
if (currpth.back() == endnode) {
path.push_back(currpth);
//for (int i = 0; i < currpth.size(); i++) {
// currflat.push_back(currpth[i]);
//}
std::vector<SgGraphNode*> flatpath;
//std::vector<SgGraphNode*> sub;
std::set<std::vector<SgGraphNode*> > incloops;
struct timeval q1, q2;
//std::cout << "path.size(): " << path.size() << std::endl;
//std::cout << "pthloops.size(): " << pthloops.size() << std::endl;
ROSE_ASSERT(path.size() == pthloops.size() + 1);
q1 = getCPUTime();
for (unsigned int q = 0; q < pthloops.size(); q++) {
//sub = path[q];
//sub.pop_back();
for (unsigned int r = 0; r < path[q].size(); r++) {
flatpath.push_back(path[q][r]);
}
for (std::set<SgGraphNode*>::iterator p = pthloops[q].begin(); p != pthloops[q].end(); p++) {
for (std::set<std::vector<SgGraphNode*> >::iterator o = mkloopmap[*p].begin(); o != mkloopmap[*p].end(); o++) {
incloops.insert(*o);
}
}
}
for (unsigned int pt2 = 0; pt2 < path[path.size()-1].size(); pt2++) {
flatpath.push_back(path[path.size()-1][pt2]);
}
q2 = getCPUTime();
fllp += timeDifference(q2,q1);
flatpath.push_back(endnode);
/*
for (unsigned int ps = 0; ps < flatpath.size(); ps++) {
if (lpbegins.find(flatpath[ps]) != lpbegins.end()) {
for (std::set<std::vector<SgGraphNode*> >::iterator sv = lpbegins[flatpath[ps]].begin(); sv != lpbegins[flatpath[ps]].end(); sv++) {
incloops.insert(*sv);
}
}
}
*/
//user defined function, run on the final path, gives the user loops that are included via "incloops" a set of vectors that contain the individual loops
pathAnalyze(flatpath, false, incloops);
incloops.clear();
//printNodePlusEdgesForAnalysisPath(g, flatpath, -1, -1, mz);
int pts = pathsSize++;
pathsSize += 1;
flatpath.clear();
path.pop_back();
int rounds = 0;
bool starter = false;
// This gets a bit complicated so here is an overview:
// This is running down the graph and finding the endnode. Once it finds the endnode it goes back up to the last unevaluated subpath. It does this quickly with an integer that counts how many times that node has been used for a path. If this ends up being the number of outnodes, we don't need that node anymore, so we clear it to zero, then continue up the graph. We HAVE to reset because every time a new pathway is chosen above that node, it needs to have the ability to traverse that node.
while (true) {
rounds++;
ROSE_ASSERT(pathsAtMk.find((path.back()).back()) != pathsAtMk.end());
if ((path.back()).front() == realstartnode) {
starter = true;
}
if (currents[(path.back()).back()] < (pathsAtMk[(path.back()).back()].size()) /*|| (path.back()).front() == realstartnode*/) {
std::vector<std::vector<SgGraphNode*> > cpths = pathsAtMk[(path.back()).back()];
currpth = cpths[currents[(path.back()).back()]];
currents[(path.back()).back()]++;
break;
}
else {
currents[(path.back()).back()] = 0;
path.pop_back();
pthloops.pop_back();
}
if (starter == true) {
step = true;
break;
}
}
}
else {
//this checks first to see if we have any loops in our path. If not it continues down, if there is it goes back to the last nonloop node
bool disj = true;
struct timeval tdisb, tdise;
tdisb = getCPUTime();
for (int i = 0; i < pthloops.size(); i++) {
for (std::set<SgGraphNode*>::iterator j = pthloops[i].begin(); j != pthloops[i].end(); j++) {
if (find(currpth.begin(), currpth.end(), *j) != currpth.end()) {
disj = false;
}
}
}
/*
#pragma omp parallel for num_threads(4) private(i,j)
for (i = 0; i < pthloops.size(); i++) {
if (disj) {
for (std::set<SgGraphNode*>::iterator j = pthloops[i].begin(); j != pthloops[i].end(); j++) {
if (find(currpth.begin(), currpth.end(), *j) != currpth.end()) {
disj = false;
//j = pthloops[i].size();
}
}
}
}
*/
tdise = getCPUTime();
distime += timeDifference(tdise, tdisb);
if (disj) {
disjointtrues++;
//std::cout << "disjoints: " << disjointtrues << std::endl;
midstep = false;
std::set<SgGraphNode*> pthloopstmp;
pthloopstmp.clear();
for (int i = 0; i < currpth.size(); i++) {
//currflat.push_back(currpth[i]);
if (mkloops.find(currpth[i]) != mkloops.end()) {
pthloopstmp.insert(currpth[i]);
}
}
pthloops.push_back(pthloopstmp);
path.push_back(currpth);
pthloopstmp.clear();
//std::set<std::vector<SgGraphNode*> > lpth;
std::vector<SgGraphNode*> oldcurrpth = currpth;
currpth.clear();
if (currents.find((path.back()).back()) == currents.end()) {
currents[(path.back()).back()] = 0;
}
SgGraphNode* frontnode = (path.back()).front();
SgGraphNode* backnode = (path.back()).back();
ROSE_ASSERT(pathsAtMk.find(backnode) != pathsAtMk.end() || backnode == endnode);
ROSE_ASSERT(pathsAtMk.find(frontnode) != pathsAtMk.end());
if (currents.find(backnode) == currents.end()) {
currents[backnode] = 0;
}
else {
ROSE_ASSERT(currents[backnode] == 0);
}
std::vector<std::vector<SgGraphNode*> > tmppths = pathsAtMk[backnode];
currpth = tmppths[currents[backnode]];
ROSE_ASSERT(currpth != oldcurrpth);
currents[backnode]++;
}
else {
disjoints++;
//std::cout << "disjoint false: " << s << std::endl;
while (true) {
if (currents[(path.back()).back()] < pathsAtMk[(path.back()).back()].size() || path.back().back() == realstartnode) {
break;
}
currents[(path.back()).back()] = 0;
path.pop_back();
pthloops.pop_back();
}
if ((path.back()).back() != realstartnode) {
currpth = (pathsAtMk[(path.back()).back()])[currents[(path.back()).back()]];
currents[(path.back()).back()]++;
}
else {
step = true;
}
}
}
}
std::cout << "successes: " << successes << std::endl;
std::cout << "failures: " << failures << std::endl;
return;
}
//these are debugging functions, used to visually ascertain where the paths are going to check to make sure everything is evaluated
/* DEBUGGING */
template<class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
printNodePlusEdgesForAnalysis(SgIncidenceDirectedGraph* g, SgGraphNode* n, int loopNum, int pathVal, std::ofstream& ss) {
printNodeForAnalysis(n, loopNum, pathVal, ss);
std::set<SgDirectedGraphEdge*> outEdges = g->computeEdgeSetOut(n);
for (std::set<SgDirectedGraphEdge*>::iterator i = outEdges.begin(); i != outEdges.end(); i++) {
if (nullEdgesOrdered.find(*i) == nullEdgesOrdered.end()) {
printEdgeForAnalysis(*i, false, ss);
}
else {
printEdgeForAnalysis(*i, true, ss);
}
}
}
template<class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
printNodePlusEdgesForAnalysisPath(SgIncidenceDirectedGraph* g, std::vector<SgGraphNode*> n, int loopNum, int pathVal, std::ofstream& ss) {
for (unsigned int i = 0; i < n.size()-1; i++) {
if (completedNodesPath.find(n[i]) == completedNodesPath.end()) {
printNodeForAnalysis(n[i], loopNum, pathVal, ss);
completedNodesPath.insert(n[i]);
}
std::pair<SgGraphNode*, SgGraphNode*> prnod;
prnod.first = n[i+1];
prnod.second = n[i];
if (completedEdgesPath.find(prnod) == completedEdgesPath.end()) {
printEdgeForAnalysisPath(n[i+1], n[i], ss);
completedEdgesPath.insert(prnod);
}
}
if (completedNodesPath.find(n[n.size() - 1]) == completedNodesPath.end()) {
printNodeForAnalysis(n[n.size()-1], loopNum, pathVal, ss);
completedNodesPath.insert(n[n.size() - 1]);
}
}
template<class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
printNodeForAnalysis(SgGraphNode* n, int loopNum, int pathNum, std::ofstream &ss) {
int id = n->get_index();
std::string nodeColor = "black";
if (loopNum != 0) {
ss << id << " [label=\"" << "LoopNumS" << loopNum << "\", color=\"" << "green" << "\", style=\"" << "solid" << "\"];\n";
}
else {
ss << id << " [label=\"" << "pathNumS" << pathNum << "\", color=\"" << "black" << "\", style=\"" << "dotted" << "\"];\n";
}
}
template<class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
printEdgeForAnalysis(SgDirectedGraphEdge* e, bool isNullEdge, std::ofstream &ss) {
if (isNullEdge) {
ss << e->get_from()->get_index() << " -> " << e->get_to()->get_index() << " [label=\"" << "NullEdge" << "\", style=\"" << "dotted" << "\"];\n";
}
else {
ss << e->get_from()->get_index() << " -> " << e->get_to()->get_index() << " [label=\"" << "\", style=\"" << "solid" << "\"];\n";
}
}
template<class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
printEdgeForAnalysisPath(SgGraphNode* g1, SgGraphNode* g2, std::ofstream &ss) {
ss << g2->get_index() << " -> " << g1->get_index() << " [label=\"" << "Edge" << "\", style=\"" << "solid" << "\"];\n";
}
/* END DEBUGGING */
//This function sets up the graph so that the evaluatePath function can easily traverse the paths
template<class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
solvePaths(SgIncidenceDirectedGraph* g, SgGraphNode* n, SgGraphNode* endnode) {
bool done = false;
bool edges = true;
bool tookone = false;
std::vector<SgGraphNode*> mkpath;
std::vector<SgGraphNode*> marks;
marks.push_back(n);
mkglobal.push_back(n);
SgGraphNode* currn = n;
SgGraphNode* took;
std::set<SgDirectedGraphEdge*> taken;
std::vector<SgGraphNode*> toTake;
std::vector<SgGraphNode*> path;
path.push_back(n);
mkpath.push_back(n);
int itr = 0;
int bifurcations = 0;
std::map<SgGraphNode*, bool> completed;
while (done == false) {
ROSE_ASSERT(currn != NULL);
//check to see if we've hit the endnode or if we're done, if not continue, if so push the subpath into the "pathsAtMk" repository
if (currn == endnode || completed.find(currn) != completed.end()) {
if (pathsAtMk.find(marks.back()) == pathsAtMk.end()) {
std::vector<std::vector<SgGraphNode*> > emptypath;
pathsAtMk[marks.back()] = emptypath;
}
edges = false;
pathsAtMk[marks.back()].push_back(mkpath);
//for (int mk = 0; mk < mkpath.size(); mk++) {
// std::set<SgDirectedGraphEdge*> iedg = g->computeEdgeSetIn(mkpath[mk]);
//if (iedg.size() > 1) {
// ploops.insert(mkpath[mk]);
// }
//}
ROSE_ASSERT(mkpath.front() == marks.back());
if (marks.size() == 0) {
return;
}
mkpath.clear();
bool y = true;
bool haventtaken = false;
bool p = true;
int place;
bool found = false;
while (found == false) {
if (marks.size() == 0) {
return;
}
SgDirectedGraphEdge* tooked;
SgGraphNode* mark1 = marks.back();
std::set<SgDirectedGraphEdge*> oedg = g->computeEdgeSetOut(mark1);
ROSE_ASSERT(oedg.size() > 1 || mark1 == n);
for (std::set<SgDirectedGraphEdge*>::iterator j = oedg.begin(); j != oedg.end(); j++) {
if (taken.find(*j) == taken.end() && haventtaken == false) {
tooked = *j;
haventtaken = true;
}
}
if (haventtaken == true) {
if (marks.back() == n) {
path.clear();
}
path.push_back(marks.back());
if ( mkpath.empty() || (mkpath.back() != marks.back()) ) {
ROSE_ASSERT(!marks.empty());
mkpath.push_back(marks.back());
}
taken.insert(tooked);
took = tooked->get_to();
found = true;
}
else {
completed[marks.back()] = true;
bifurcations++;
marks.pop_back();
}
}
if (marks.size() == 0) {
return;
}
haventtaken = false;
found = false;
}
//if we haven't reached the endnode or completed, continue down the graph
else {
std::set<SgDirectedGraphEdge*> oedg = g->computeEdgeSetOut(currn);
std::set<SgDirectedGraphEdge*> iedg = g->computeEdgeSetIn(currn);
if (oedg.size() > 1) {
if (mkpath.back() != currn) {
mkpath.push_back(currn);
}
pathsAtMk[marks.back()].push_back(mkpath);
mkpath.clear();
mkpath.push_back(currn);
marks.push_back(currn);
if (find(mkglobal.begin(), mkglobal.end(), currn) == mkglobal.end()) {
mkglobal.push_back(currn);
}
for (std::set<SgDirectedGraphEdge*>::iterator i = oedg.begin(); i != oedg.end(); i++) {
if (taken.find(*i) == taken.end() && tookone == false) {
taken.insert(*i);
tookone = true;
took = (*i)->get_to();
}
else if (taken.find(*i) == taken.end() && tookone == true) {
//toTake.push_back((*i)->get_to());
}
}
tookone = false;
}
else {
took = (*(oedg.begin()))->get_to();
}
}
itr++;
if (find(path.begin(), path.end(), took) == path.end()) {
mkpath.push_back(took);
path.push_back(took);
currn = took;
}
else {
mkloops.insert(took);
std::vector<SgGraphNode*> lptemp;
lptemp.clear();
lptemp.push_back(took);
while (path.back() != took) {
path.pop_back();
lptemp.push_back(path.back());
}
(mkloopmap[took]).insert(lptemp);
/*
if (lptemp.size() > 1) {
if (find(looppaths.begin(), looppaths.end(), lptemp) == looppaths.end() && find(lptemp.begin(), lptemp.end(), st) == lptemp.end() && find(lptemp.begin(), lptemp.end(), endnode) == lptemp.end()) {
looppaths.push_back(lptemp);
loopnum++;
for (unsigned int i = 0; i < lptemp.size(); i++) {
loopNumMap[lptemp[i]] = loopnum;
}
}
}
*/
path.push_back(took);
currn = path.back();
mkpath.push_back(took);
}
}
return;
}
//not currently useful
template <class InheritedAttributeType, class SynthesizedAttributeType>
SynthesizedAttributeType
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
defaultSynthesizedAttribute(InheritedAttributeType inh)
{
SynthesizedAttributeType s = SynthesizedAttributeType();
return s;
}
//computes the order in which to evaluate the nodes in nodal analysis so that you don't evaluate a node before you evaluate its parents
template <class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
computeOrder(SgIncidenceDirectedGraph* g, SgGraphNode* n, SgGraphNode* endnode) {
std::map<SgGraphNode*, int> incomputables;
std::set<SgGraphNode*> lpposs;
//std::set<SgGraphNode*> lps;
SgGraphNode* currn;
currn = n;
int orders = 0;
while (true) {
if (orders % 10000 == 0) {
std::cout << "orders: " << orders << std::endl;
}
orders++;
if (currn == endnode) {
}
if (computable(g, currn) || currn == n) {
int mp;
if (oVals.find(currn) == oVals.end()) {
oVals[currn] = currm++;
iVals[currm++] = currn;
currm += 1;
}
if (currn == endnode) {
break;
}
std::pair<bool, SgGraphNode*> pbs = getNextChild(g, currn);
computedNodes.insert(currn);
ROSE_ASSERT(pbs.first == true);
currn = pbs.second;
}
else {
std::pair<bool, SgGraphNode*> pbp = getNextPar(g, currn);
ROSE_ASSERT(pbp.first == true);
currn = pbp.second;
}
}
std::cout << "required orders" << orders << std::endl;
std::cout << "incomputables.size() " << incomputables.size() << std::endl;
}
//simple fucntion to check the computability under nodal analysis
template <class InheritedAttributeType, class SynthesizedAttributeType>
bool
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
computable(SgIncidenceDirectedGraph* g, SgGraphNode* n) {
if (computedNodes.find(n) != computedNodes.end()) {
return true;
}
std::set<SgDirectedGraphEdge*> ed = g->computeEdgeSetIn(n);
bool comp = true;
for (std::set<SgDirectedGraphEdge*>::iterator i = ed.begin(); i != ed.end(); i++) {
if (oVals.find((*i)->get_from()) == oVals.end() && nullEdgesOrdered.find(*i) == nullEdgesOrdered.end()) {
comp = false;
}
}
return comp;
}
//computes the inherited attribute values in nodal analysis
template <class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
computeInheritedOrdered(SgIncidenceDirectedGraph* g, SgGraphNode* n) {
int runs = 0;
// std::ofstream mf;
// mf.open("analysis.dot");
// mf << "digraph defaultName { \n";
for (std::map<int, SgGraphNode*>::iterator i = iVals.begin(); i != iVals.end(); i++) {
runs++;
ROSE_ASSERT(canEval(g, (*i).second));
setPathVal(g, n);
//printNodePlusEdgesForAnalysis(g, (*i).second, loopNumMap[(*i).second], pathValMap[(*i).second], mf);
evalNodeOrdered(g, (*i).second);
}
}
//checks to see if evaluation is possible under nodal analysis
template <class InheritedAttributeType, class SynthesizedAttributeType>
bool
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
canEval(SgIncidenceDirectedGraph* g, SgGraphNode* n) {
bool evaled = true;
if (inhVals.find(n) == inhVals.end()) {
std::set<SgDirectedGraphEdge*> ins = g->computeEdgeSetIn(n);
for (std::set<SgDirectedGraphEdge*>::iterator i = ins.begin(); i != ins.end(); i++) {
if (inhVals.find((*i)->get_from()) == inhVals.end() && nullEdgesOrdered.find(*i) == nullEdgesOrdered.end()) {
evaled = false;
}
}
}
return evaled;
}
//actually does the evaluation
template <class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
evalNodeOrdered(SgIncidenceDirectedGraph* g, SgGraphNode* n) {
if (inhVals.find(n) != inhVals.end()) {
return;
}
std::set<SgDirectedGraphEdge*> par = g->computeEdgeSetIn(n);
std::vector<InheritedAttributeType> inh;
for (std::set<SgDirectedGraphEdge*>::iterator i = par.begin(); i != par.end(); i++) {
if (inhVals.find((*i)->get_from()) != inhVals.end()) {
inh.push_back(inhVals[(*i)->get_from()]);
}
}
if (n != st || inh.size() > 0) {
InheritedAttributeType inhX;
inhX = evaluateInheritedAttribute(n, inh);
inhVals[n] = inhX;
}
//std::cout << "num of inhVals: " << inh.size() << std::endl;
}
//debugging function, currently not useful for the end user
template <class InheritedAttributeType, class SynthesizedAttributeType>
void
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
setPathVal(SgIncidenceDirectedGraph* g, SgGraphNode* currn) {
if (pathValMap.find(currn) != pathValMap.end()) {
return;
}
std::set<SgDirectedGraphEdge*> ined = g->computeEdgeSetIn(currn);
int tmppathcount = 0;
for (std::set<SgDirectedGraphEdge*>::iterator i = ined.begin(); i != ined.end(); i++) {
ROSE_ASSERT(pathValMap.find((*i)->get_from()) != pathValMap.end() /*|| nullEdgesOrdered.find(*i) != nullEdgesOrdered.end()*/);
//if (nullEdgesOrdered.find(*i) != nullEdgesOrdered.end()) {
// pathValMap[(*i)->get_from()] = 0;
// }
int pv = pathValMap[(*i)->get_from()];
if (pv != 0) {
tmppathcount += pv;
}
}
pathValMap[currn] = tmppathcount;
return;
}
//computes the next child to be analyzed in nodal analysis
template <class InheritedAttributeType, class SynthesizedAttributeType>
std::pair<bool, SgGraphNode*>
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
getNextChild(SgIncidenceDirectedGraph* g, SgGraphNode* n) {
bool nullPoss = false;
//std::cout << "nextChild" << std::endl;
std::set<SgDirectedGraphEdge*> outs = g->computeEdgeSetOut(n);
//std::cout << "outs.size(): " << outs.size() << std::endl;
//std::cout << "outs: " << outs.size() << std::endl;
SgGraphNode* nextNode;
SgGraphNode* nullNode;
bool completed = false;
bool completeNull = false;
for (std::set<SgDirectedGraphEdge*>::iterator i = outs.begin(); i != outs.end(); i++) {
if (outs.size() == 1) {
nextNode = (*i)->get_to();
if (nullEdgesOrdered.find(*i) != nullEdgesOrdered.end()) {
nullNum++;
}
//completedEdges.insert(*i);
completed = true;
}
else if (completed == false && computedNodes.find((*i)->get_to()) == computedNodes.end()) {
completed = true;
nextNode = (*i)->get_to();
if (nullEdgesOrdered.find(*i) != nullEdgesOrdered.end()) {
nullNum++;
}
completedEdgesOut.insert(*i);
}
}
std::pair<bool, SgGraphNode*> pr;
ROSE_ASSERT (completed == true || completeNull == true);
if (completed == true) {
pr.first = completed;
pr.second = nextNode;
return pr;
}
else {
pr.first = true;
pr.second = nullNode;
return pr;
}
}
//computes the next parent to be analyzed in nodal analysis
template <class InheritedAttributeType, class SynthesizedAttributeType>
std::pair<bool, SgGraphNode*>
SgGraphTraversal<InheritedAttributeType, SynthesizedAttributeType>::
getNextPar(SgIncidenceDirectedGraph* g, SgGraphNode* n) {
std::set<SgDirectedGraphEdge*> ins = g->computeEdgeSetIn(n);
SgGraphNode* nextPar;
SgDirectedGraphEdge* nullEdgeO;
bool completed = false;
bool completeNull = false;
for (std::set<SgDirectedGraphEdge*>::iterator i = ins.begin(); i != ins.end(); i++) {
if (ins.size() == 1 /*&& completedEdges.find(*i) == completedEdges.end()*/) {
completed = true;
completedEdges.insert(*i);
nextPar = (*i)->get_from();
}
else if (completedEdges.find(*i) == completedEdges.end() && completed == false) {
completed = true;
nextPar = (*i)->get_from();
completedEdges.insert(*i);
}
else if (completedEdges.find(*i) != completedEdges.end() && computedNodes.find((*i)->get_from()) == computedNodes.end() && completed == false /*&& nullEdgesOrdered.find(*i) == nullEdgesOrdered.end()*/) {
completeNull = true;
std::pair<SgGraphNode*, SgGraphNode*> lpp;
nextPar = n;
nullEdgesOrdered.insert(*i);
nullEdgesPaths++;
}
}
ROSE_ASSERT(completed == true || completeNull == true);
std::pair<bool, SgGraphNode*> pr;
pr.first = completed;
pr.second = nextPar;
if (completeNull == true && completed == false) {
pr.first = completeNull;
pr.second = nextPar;
}
return pr;
}
|
DiracMatrix.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2019 QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_DIRAC_MATRIX_H
#define QMCPLUSPLUS_DIRAC_MATRIX_H
#include "CPU/Blasf.h"
#include "CPU/BlasThreadingEnv.h"
#include "OhmmsPETE/OhmmsMatrix.h"
#include "type_traits/scalar_traits.h"
#include "Message/OpenMP.h"
#include "CPU/SIMD/simd.hpp"
namespace qmcplusplus
{
inline void Xgetrf(int n, int m, float* restrict a, int lda, int* restrict piv)
{
int status;
sgetrf(n, m, a, lda, piv, status);
}
inline void Xgetri(int n, float* restrict a, int lda, int* restrict piv, float* restrict work, int& lwork)
{
int status;
sgetri(n, a, lda, piv, work, lwork, status);
}
inline void Xgetrf(int n, int m, std::complex<float>* restrict a, int lda, int* restrict piv)
{
int status;
cgetrf(n, m, a, lda, piv, status);
}
/** inversion of a float matrix after lu factorization*/
inline void Xgetri(int n,
std::complex<float>* restrict a,
int lda,
int* restrict piv,
std::complex<float>* restrict work,
int& lwork)
{
int status;
cgetri(n, a, lda, piv, work, lwork, status);
}
inline void Xgetrf(int n, int m, double* restrict a, int lda, int* restrict piv)
{
int status;
dgetrf(n, m, a, lda, piv, status);
}
inline void Xgetri(int n, double* restrict a, int lda, int* restrict piv, double* restrict work, int& lwork)
{
int status;
dgetri(n, a, lda, piv, work, lwork, status);
}
inline void Xgetrf(int n, int m, std::complex<double>* restrict a, int lda, int* restrict piv)
{
int status;
zgetrf(n, m, a, lda, piv, status);
}
/** inversion of a std::complex<double> matrix after lu factorization*/
inline void Xgetri(int n,
std::complex<double>* restrict a,
int lda,
int* restrict piv,
std::complex<double>* restrict work,
int& lwork)
{
int status;
zgetri(n, a, lda, piv, work, lwork, status);
}
template<typename TIN, typename TOUT>
inline void TansposeSquare(const TIN* restrict in, TOUT* restrict out, size_t n, size_t lda)
{
#pragma omp simd
for (size_t i = 0; i < n; ++i)
for (size_t j = 0; j < n; ++j)
out[i * lda + j] = in[i + j * lda];
}
template<typename T, typename T_FP>
inline void computeLogDet(const T* restrict diag, int n, const int* restrict pivot, std::complex<T_FP>& logdet)
{
logdet = std::complex<T_FP>();
for (size_t i = 0; i < n; i++)
logdet += std::log(std::complex<T_FP>((pivot[i] == i + 1) ? diag[i] : -diag[i]));
}
/** helper class to compute matrix inversion and the log value of determinant
* @tparam T_FP the datatype used in the actual computation of matrix inversion
*/
template<typename T_FP>
class DiracMatrix
{
typedef typename scalar_traits<T_FP>::real_type real_type_fp;
aligned_vector<T_FP> m_work;
aligned_vector<int> m_pivot;
int Lwork;
/// scratch space used for mixed precision
Matrix<T_FP> psiM_fp;
/// LU diagonal elements
aligned_vector<T_FP> LU_diag;
/// reset internal work space
inline void reset(T_FP* invMat_ptr, const int lda)
{
m_pivot.resize(lda);
Lwork = -1;
T_FP tmp;
real_type_fp lw;
Xgetri(lda, invMat_ptr, lda, m_pivot.data(), &tmp, Lwork);
convert(tmp, lw);
Lwork = static_cast<int>(lw);
m_work.resize(Lwork);
LU_diag.resize(lda);
}
/** compute the inverse of invMat (in place) and the log value of determinant
* @tparam TREAL real type
* @param n invMat is n x n matrix
* @param lda the first dimension of invMat
* @param LogDet log determinant value of invMat before inversion
*/
template<typename TREAL>
inline void computeInvertAndLog(T_FP* invMat, const int n, const int lda, std::complex<TREAL>& LogDet)
{
BlasThreadingEnv knob(getNextLevelNumThreads());
if (Lwork < lda)
reset(invMat, lda);
Xgetrf(n, n, invMat, lda, m_pivot.data());
for(int i=0; i<n; i++)
LU_diag[i] = invMat[i*lda+i];
computeLogDet(LU_diag.data(), n, m_pivot.data(), LogDet);
Xgetri(n, invMat, lda, m_pivot.data(), m_work.data(), Lwork);
}
public:
DiracMatrix() : Lwork(0) {}
/** compute the inverse of the transpose of matrix A and its determinant value in log
* when T_FP and TMAT are the same
* @tparam TMAT matrix value type
* @tparam TREAL real type
*/
template<typename TMAT, typename TREAL>
inline std::enable_if_t<std::is_same<T_FP, TMAT>::value>
invert_transpose(const Matrix<TMAT>& amat,
Matrix<TMAT>& invMat,
std::complex<TREAL>& LogDet)
{
const int n = invMat.rows();
const int lda = invMat.cols();
simd::transpose(amat.data(), n, amat.cols(), invMat.data(), n, lda);
computeInvertAndLog(invMat.data(), n, lda, LogDet);
}
/** compute the inverse of the transpose of matrix A and its determinant value in log
* when T_FP and TMAT are not the same and need scratch space psiM_fp
* @tparam TMAT matrix value type
* @tparam TREAL real type
*/
template<typename TMAT, typename TREAL>
inline std::enable_if_t<!std::is_same<T_FP, TMAT>::value>
invert_transpose(const Matrix<TMAT>& amat,
Matrix<TMAT>& invMat,
std::complex<TREAL>& LogDet)
{
const int n = invMat.rows();
const int lda = invMat.cols();
psiM_fp.resize(n,lda);
simd::transpose(amat.data(), n, amat.cols(), psiM_fp.data(), n, lda);
computeInvertAndLog(psiM_fp.data(), n, lda, LogDet);
invMat = psiM_fp;
}
};
} // namespace qmcplusplus
#endif // QMCPLUSPLUS_DIRAC_MATRIX_H
|
f3f625.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float **r47;
posix_memalign((void **)&r47, 64, sizeof(float *) * nthreads);
float **r48;
posix_memalign((void **)&r48, 64, sizeof(float *) * nthreads);
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
posix_memalign((void **)&r47[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
posix_memalign((void **)&r48[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
}
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 64)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m-1 ; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m-1 ; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M , x_m, y_M , y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, (float **)r47, (float **)r48, time, tw);
// x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m,
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
free(r47[tid]);
free(r48[tid]);
}
free(r17);
free(r18);
free(r19);
free(r20);
free(r21);
free(r47);
free(r48);
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
float **r47 = (float **)r47_vec;
float **r48 = (float **)r48_vec;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
float(*restrict r34)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r47[tid];
float(*restrict r35)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r48[tid];
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0 - 1, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0 - 1, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys);
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r39 = -u[t0][x - time + 4][y - time + 4][z + 4];
r34[xs][ys][z + 1] = 1.0e-1F * (-(r39 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
float r40 = -v[t0][x - time + 4][y - time + 4][z + 4];
r35[xs][ys][z + 1] = 1.0e-1F * (-(r40 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
}
}
}
for (int x = x0_blk0, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x - time + 1][y - time + 1][z] * r35[xs + 1][ys + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r35[xs + 1][ys][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r35[xs][ys + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1];
float r43 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]);
float r42 = 1.0e-1F * (-r18[x - time + 1][y - time + 1][z] * r34[xs + 1][ys + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r34[xs + 1][ys][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r34[xs][ys + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1]) - 8.33333315e-4F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2F * u[t0][x - time + 4][y - time + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t1][x - time + 4][y - time + 4][z + 4]);
u[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4]));
v[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4]));
}
int sp_zi_M = nnz_sp_source_mask[x-time][y-time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x-time][y-time][sp_zi];
float r22 = save_src_u[tw][source_id[x-time][y-time][zind]] * source_mask[x-time][y-time][zind];
u[t2][x -time + 4][y -time + 4][zind + 4] += r22;
float r23 = save_src_v[tw][source_id[x-time][y-time][zind]] * source_mask[x-time][y-time][zind];
v[t2][x-time + 4][y-time + 4][zind + 4] += r23;
//printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23);
}
}
}
}
}
}
}
|
omp_smithW.c | /*********************************************************************************
* Smith–Waterman algorithm
* Purpose: Local alignment of nucleotide or protein sequences
* Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro
* Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG
* Execution: ./omp_smithW <number_of_threads> <number_of_col> <number_of_rows>
*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <time.h>
/*--------------------------------------------------------------------
* Text Tweaks
*/
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
/* End of text tweaks */
/*--------------------------------------------------------------------
* Constants
*/
#define PATH -1
#define NONE 0
#define UP 1
#define LEFT 2
#define DIAGONAL 3
/* End of constants */
/*--------------------------------------------------------------------
* Helpers
*/
#define min(x, y) (((x) < (y)) ? (x) : (y))
#define max(a,b) ((a) > (b) ? a : b)
// #define DEBUG
/* End of Helpers */
/*--------------------------------------------------------------------
* Functions Prototypes
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos);
int matchMissmatchScore(long long int i, long long int j);
void backtrack(int* P, long long int maxPos);
void printMatrix(int* matrix);
void printPredecessorMatrix(int* matrix);
void generate(void);
long long int nElement(long long int i);
void calcFirstDiagElement(long long int *i, long long int *si, long long int *sj);
/* End of prototypes */
/*--------------------------------------------------------------------
* Global Variables
*/
//Defines size of strings to be compared
long long int m ; //Columns - Size of string a
long long int n ; //Lines - Size of string b
//Defines scores
int matchScore = 5;
int missmatchScore = -3;
int gapScore = -4;
//Strings over the Alphabet Sigma
char *a, *b;
/* End of global variables */
/*--------------------------------------------------------------------
* Function: main
*/
int main(int argc, char* argv[]) {
int thread_count = strtol(argv[1], NULL, 10);
m = strtoll(argv[2], NULL, 10);
n = strtoll(argv[3], NULL, 10);
#ifdef DEBUG
printf("\nMatrix[%lld][%lld]\n", n, m);
#endif
//Allocates a and b
a = malloc(m * sizeof(char));
b = malloc(n * sizeof(char));
//Because now we have zeros
m++;
n++;
//Allocates similarity matrix H
int *H;
H = calloc(m * n, sizeof(int));
//Allocates predecessor matrix P
int *P;
P = calloc(m * n, sizeof(int));
//Gen rand arrays a and b
generate();
//Uncomment this to test the sequence available at
//http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
// OBS: m=11 n=7
// a[0] = 'C';
// a[1] = 'G';
// a[2] = 'T';
// a[3] = 'G';
// a[4] = 'A';
// a[5] = 'A';
// a[6] = 'T';
// a[7] = 'T';
// a[8] = 'C';
// a[9] = 'A';
// a[10] = 'T';
// b[0] = 'G';
// b[1] = 'A';
// b[2] = 'C';
// b[3] = 'T';
// b[4] = 'T';
// b[5] = 'A';
// b[6] = 'C';
//Start position for backtrack
long long int maxPos = 0;
//Calculates the similarity matrix
long long int i, j;
//Gets Initial time
double initialTime = omp_get_wtime();
long long int si, sj, ai, aj;
//Because now we have zeros ((m-1) + (n-1) - 1)
long long int nDiag = m + n - 3;
long long int nEle;
#pragma omp parallel num_threads(thread_count) \
default(none) shared(H, P, maxPos, nDiag) private(nEle, i, si, sj, ai, aj)
{
for (i = 1; i <= nDiag; ++i)
{
nEle = nElement(i);
calcFirstDiagElement(&i, &si, &sj);
#pragma omp for
for (j = 1; j <= nEle; ++j)
{
ai = si - j + 1;
aj = sj + j - 1;
similarityScore(ai, aj, H, P, &maxPos);
}
}
}
backtrack(P, maxPos);
//Gets final time
double finalTime = omp_get_wtime();
printf("\nElapsed time: %f\n\n", finalTime - initialTime);
#ifdef DEBUG
printf("\nSimilarity Matrix:\n");
printMatrix(H);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P);
#endif
//Frees similarity matrixes
free(H);
free(P);
//Frees input arrays
free(a);
free(b);
return 0;
} /* End of main */
/*--------------------------------------------------------------------
* Function: nElement
* Purpose: Calculate the number of i-diagonal elements
*/
long long int nElement(long long int i) {
if (i < m && i < n) {
//Number of elements in the diagonal is increasing
return i;
}
else if (i < max(m, n)) {
//Number of elements in the diagonal is stable
long int min = min(m, n);
return min - 1;
}
else {
//Number of elements in the diagonal is decreasing
long int min = min(m, n);
return 2 * min - i + abs(m - n) - 2;
}
}
/*--------------------------------------------------------------------
* Function: calcElement
* Purpose: Calculate the position of (si, sj)-element
*/
void calcFirstDiagElement(long long int *i, long long int *si, long long int *sj) {
// Calculate the first element of diagonal
if (*i < n) {
*si = *i;
*sj = 1;
} else {
*si = n - 1;
*sj = *i - n + 2;
}
}
/*--------------------------------------------------------------------
* Function: SimilarityScore
* Purpose: Calculate the maximum Similarity-Score H(i,j)
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
//Stores index of element
long long int index = m * i + j;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
diag = H[index - m - 1] + matchMissmatchScore(i, j);
//Calculates the maximum
int max = NONE;
int pred = NONE;
/* === Matrix ===
* a[0] ... a[n]
* b[0]
* ...
* b[n]
*
* generate 'a' from 'b', if '←' insert e '↑' remove
* a=GAATTCA
* b=GACTT-A
*
* generate 'b' from 'a', if '←' insert e '↑' remove
* b=GACTT-A
* a=GAATTCA
*/
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
#pragma omp critical
if (max > H[*maxPos]) {
*maxPos = index;
}
} /* End of similarityScore */
/*--------------------------------------------------------------------
* Function: matchMissmatchScore
* Purpose: Similarity function on the alphabet for match/missmatch
*/
int matchMissmatchScore(long long int i, long long int j) {
if (a[j - 1] == b[i - 1])
return matchScore;
else
return missmatchScore;
} /* End of matchMissmatchScore */
/*--------------------------------------------------------------------
* Function: backtrack
* Purpose: Modify matrix to print, path change from value to PATH
*/
void backtrack(int* P, long long int maxPos) {
//hold maxPos value
long long int predPos;
//backtrack from maxPos to startPos = 0
do {
if (P[maxPos] == DIAGONAL)
predPos = maxPos - m - 1;
else if (P[maxPos] == UP)
predPos = maxPos - m;
else if (P[maxPos] == LEFT)
predPos = maxPos - 1;
P[maxPos] *= PATH;
maxPos = predPos;
} while (P[maxPos] != NONE);
} /* End of backtrack */
/*--------------------------------------------------------------------
* Function: printMatrix
* Purpose: Print Matrix
*/
void printMatrix(int* matrix) {
long long int i, j;
printf("-\t-\t");
for (j = 0; j < m-1; j++) {
printf("%c\t", a[j]);
}
printf("\n-\t");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c\t", b[i-1]);
printf("%d\t", matrix[m * i + j]);
}
printf("\n");
}
} /* End of printMatrix */
/*--------------------------------------------------------------------
* Function: printPredecessorMatrix
* Purpose: Print predecessor matrix
*/
void printPredecessorMatrix(int* matrix) {
long long int i, j, index;
printf(" ");
for (j = 0; j < m-1; j++) {
printf("%c ", a[j]);
}
printf("\n ");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c ", b[i-1]);
index = m * i + j;
if (matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf("↑ ");
else if (matrix[index] == -LEFT)
printf("← ");
else if (matrix[index] == -DIAGONAL)
printf("↖ ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf("↑ ");
else if (matrix[index] == LEFT)
printf("← ");
else if (matrix[index] == DIAGONAL)
printf("↖ ");
else
printf("- ");
}
}
printf("\n");
}
} /* End of printPredecessorMatrix */
/*--------------------------------------------------------------------
* Function: generate
* Purpose: Generate arrays a and b
*/
void generate() {
//Random seed
srand(time(NULL));
//Generates the values of a
long long int i;
for (i = 0; i < m; i++) {
int aux = rand() % 4;
if (aux == 0)
a[i] = 'A';
else if (aux == 2)
a[i] = 'C';
else if (aux == 3)
a[i] = 'G';
else
a[i] = 'T';
}
//Generates the values of b
for (i = 0; i < n; i++) {
int aux = rand() % 4;
if (aux == 0)
b[i] = 'A';
else if (aux == 2)
b[i] = 'C';
else if (aux == 3)
b[i] = 'G';
else
b[i] = 'T';
}
} /* End of generate */
/*--------------------------------------------------------------------
* External References:
* http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
* http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm
* http://baba.sourceforge.net/
*/
|
2182.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
{
/* E := A*B */
#pragma omp target teams distribute
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp target teams distribute
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp target teams distribute
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
omptest-ori.c | #include <nautilus/nautilus.h>
#include <nautilus/shell.h>
#include <nautilus/libccompat.h>
#include <nautilus/random.h>
//#include <nautilus/scheduler.h>
#ifndef NAUT_CONFIG_DEBUG_GPUDEV
#undef DEBUG_PRINT
#define DEBUG_PRINT(fmt, args...)
#endif
#define ERROR(fmt, args...) ERROR_PRINT("omptest: " fmt, ##args)
#define DEBUG(fmt, args...) DEBUG_PRINT("omptest: " fmt, ##args)
#define INFO(fmt, args...) INFO_PRINT("omptest: " fmt, ##args)
static inline uint16_t random()
{
uint16_t t;
nk_get_rand_bytes((uint8_t *)&t,sizeof(t));
return t;
}
#define MAXN 5100 /* Max value of N */
int N; /* Matrix size */
int procs; /* Number of processors to use */
/* Matrices and vectors */
volatile float A[MAXN][MAXN], B[MAXN], X[MAXN];
volatile float ORA[MAXN][MAXN], ORB[MAXN], ORX[MAXN];
/* A * X = B, solve for X */
int seed;
/* Prototype */
void gauss(); /* The function you will provide.
* It is this routine that is timed.
* It is called only on the parent.
*/
/* Initialize A and B (and X to 0.0s) */
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
// #pragma omp parallel num_threads(8)
{
// #pragma omp for private(row,col) schedule(static,1) nowait
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
ORA[col][row] = (float) random()/32768.0;
}
ORB[col] = (float)random()/32768.0;
}
}
}
void reset_inputs(){
int row, col;
printf("\n reseting...\n");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
A[row][col] = ORA[row][col];
}
B[col] = ORB[col];
X[col] = 0.0;
}
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 1000) {
printf("\nA =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
printf("\nB = [");
for (col = 0; col < N; col++) {
printf("%5.2f%s", B[col], (col < N-1) ? "; " : "]\n");
}
}
}
void serialgauss(){
int norm, row, col; /* Normalization row, and zeroing
* element row and col */
float multiplier;
printf("Computing serially.\n");
/* Gaussian elimination */
for (norm = 0; norm < N - 1; norm++) {
// int num = N - norm;
{
//printf("%f ", A[norm][norm]);
for (row = norm + 1; row < N; row++) {
multiplier = A[row][norm] / A[norm][norm];
for (col = norm; col < N; col++) {
A[row][col] -= A[norm][col] * multiplier;
}
B[row] -= B[norm] * multiplier;
}
}
}
/* (Diagonal elements are not normalized to 1. This is treated in back
* substitution.)
*/
/* Back substitution */
for (row = N - 1; row >= 0; row--) {
X[row] = B[row];
for (col = N-1; col > row; col--) {
X[row] -= A[row][col] * X[col];
}
X[row] /= A[row][row];
//printf("%5.2f ", X[row]);
}
}
void ompgauss() {
int norm, row, col; /* Normalization row, and zeroing
* element row and col */
float multiplier;
//doneflag[0] = 1;
printf("Computing using omp.\n");
/* Gaussian elimination */
#pragma omp parallel private(row, col, multiplier, norm) num_threads(procs)
{
for (norm = 0; norm < N - 1; norm++) {
#pragma omp for schedule(static,1)
for (row = norm + 1; row < N; row++) {
multiplier = A[row][norm]/A[norm][norm];
for (col = norm; col < N; col++) {
A[row][col] -= A[norm][col] * multiplier;
}
B[row] -= B[norm] * multiplier;
}
}
}
nk_vc_printf("I am done\n");
/* (Diagonal elements are not normalized to 1. This is treated in back
* substitution.)
*/
/* Back substitution */
for (row = N - 1; row >= 0; row--) {
X[row] = B[row];
for (col = N-1; col > row; col--) {
X[row] -= A[row][col] * X[col];
}
X[row] /= A[row][row];
}
}
#define TIME() (double)nk_sched_get_realtime();
static int handle_omptest (char * buf, void * priv)
{
int seed, size, np;
if ((sscanf(buf,"omptest %d %d %d",&seed,&size,&np)!=3)) {
nk_vc_printf("Don't understand %s please input seed, matrix size and nprocs\n",buf);
return -1;
}
nk_rand_seed(seed);
N = size;
procs = np;
nk_vc_printf("seed %d, size, %d, nprocs: %d\n", seed, N, procs);
initialize_inputs();
reset_inputs();
// print_inputs();
unsigned mxcsr;
__asm__ volatile("ldmxcsr %0"::"m"(*&mxcsr):"memory");
printf("ld %04x \n", mxcsr);
mxcsr = mxcsr ^ 0x0200;
printf("st %08x \n", mxcsr);
__asm__ volatile("stmxcsr %0"::"m"(*&mxcsr):"memory");
__asm__ volatile("ldmxcsr %0"::"m"(*&mxcsr):"memory");
printf("ld %08x \n", mxcsr);
double start = TIME();
ompgauss();
double end = TIME();
double omp = end-start;
nk_vc_printf("openmp done %lf\n", omp);
float OMP[N];
for(int row =0; row<N; row++){
OMP[row] = X[row];
}
reset_inputs();
start = TIME();
serialgauss();
end = TIME();
double serial = end-start;
nk_vc_printf("serial done %lf\n", serial);
float difference = 0.0;
for(int row =0; row<N; row++){
difference += (OMP[row]- X[row]);
}
nk_vc_printf("OMP difference %f speed up %f !\n", difference, serial/omp);
return 0;
}
static struct shell_cmd_impl omptest_impl = {
.cmd = "omptest",
.help_str = "openmp test",
.handler = handle_omptest,
};
nk_register_shell_cmd(omptest_impl);
|
GB_unop__identity_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_int8)
// op(A') function: GB (_unop_tran__identity_uint8_int8)
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_int8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__plus_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc64)
// A*D function (colscale): GB (_AxD__plus_fc64)
// D*A function (rowscale): GB (_DxB__plus_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc64)
// C=scalar+B GB (_bind1st__plus_fc64)
// C=scalar+B' GB (_bind1st_tran__plus_fc64)
// C=A+scalar GB (_bind2nd__plus_fc64)
// C=A'+scalar GB (_bind2nd_tran__plus_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_add (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_add (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FC64 || GxB_NO_PLUS_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_add (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_add (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_add (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_add (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ptr_and_obj_motion.c | // RUN: %libomptarget-compile-run-and-check-generic
// amdgcn does not have printf definition
// XFAIL: amdgcn-amd-amdhsa
#include <stdio.h>
typedef struct {
double *dataptr;
int dummy1;
int dummy2;
} DV;
void init(double vertexx[]) {
#pragma omp target map(vertexx[0:100])
{
printf("In init: %lf, expected 100.0\n", vertexx[77]);
vertexx[77] = 77.0;
}
}
void change(DV *dvptr) {
#pragma omp target map(dvptr->dataptr[0:100])
{
printf("In change: %lf, expected 77.0\n", dvptr->dataptr[77]);
dvptr->dataptr[77] += 1.0;
}
}
int main() {
double vertexx[100];
vertexx[77] = 100.0;
DV dv;
dv.dataptr = &vertexx[0];
#pragma omp target enter data map(to:vertexx[0:100])
init(vertexx);
change(&dv);
#pragma omp target exit data map(from:vertexx[0:100])
// CHECK: Final: 78.0
printf("Final: %lf\n", vertexx[77]);
}
|
kmp_stats.h | #ifndef KMP_STATS_H
#define KMP_STATS_H
/** @file kmp_stats.h
* Functions for collecting statistics.
*/
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
#include "kmp_config.h"
#include "kmp_debug.h"
#if KMP_STATS_ENABLED
/* Statistics accumulator.
Accumulates number of samples and computes min, max, mean, standard deviation
on the fly.
Online variance calculation algorithm from
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm
*/
#include "kmp_stats_timing.h"
#include <limits>
#include <math.h>
#include <new> // placement new
#include <stdint.h>
#include <string>
#include <vector>
/* Enable developer statistics here if you want them. They are more detailed
than is useful for application characterisation and are intended for the
runtime library developer. */
#define KMP_DEVELOPER_STATS 0
/* Enable/Disable histogram output */
#define KMP_STATS_HIST 0
/*!
* @ingroup STATS_GATHERING
* \brief flags to describe the statistic (timer or counter)
*
*/
enum stats_flags_e {
noTotal = 1 << 0, //!< do not show a TOTAL_aggregation for this statistic
onlyInMaster = 1 << 1, //!< statistic is valid only for master
noUnits = 1 << 2, //!< statistic doesn't need units printed next to it
notInMaster = 1 << 3, //!< statistic is valid only for non-master threads
logEvent = 1 << 4 //!< statistic can be logged on the event timeline when
//! KMP_STATS_EVENTS is on (valid only for timers)
};
/*!
* @ingroup STATS_GATHERING
* \brief the states which a thread can be in
*
*/
enum stats_state_e {
IDLE,
SERIAL_REGION,
FORK_JOIN_BARRIER,
PLAIN_BARRIER,
TASKWAIT,
TASKYIELD,
TASKGROUP,
IMPLICIT_TASK,
EXPLICIT_TASK
};
/*!
* \brief Add new counters under KMP_FOREACH_COUNTER() macro in kmp_stats.h
*
* @param macro a user defined macro that takes three arguments -
* macro(COUNTER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \details A counter counts the occurrence of some event. Each thread
* accumulates its own count, at the end of execution the counts are aggregated
* treating each thread as a separate measurement. (Unless onlyInMaster is set,
* in which case there's only a single measurement). The min,mean,max are
* therefore the values for the threads. Adding the counter here and then
* putting a KMP_BLOCK_COUNTER(name) at the point you want to count is all you
* need to do. All of the tables and printing is generated from this macro.
* Format is "macro(name, flags, arg)"
*
* @ingroup STATS_GATHERING
*/
// clang-format off
#define KMP_FOREACH_COUNTER(macro, arg) \
macro(OMP_PARALLEL,stats_flags_e::onlyInMaster|stats_flags_e::noTotal,arg) \
macro(OMP_NESTED_PARALLEL, 0, arg) \
macro(OMP_LOOP_STATIC, 0, arg) \
macro(OMP_LOOP_STATIC_STEAL, 0, arg) \
macro(OMP_LOOP_DYNAMIC, 0, arg) \
macro(OMP_DISTRIBUTE, 0, arg) \
macro(OMP_BARRIER, 0, arg) \
macro(OMP_CRITICAL, 0, arg) \
macro(OMP_SINGLE, 0, arg) \
macro(OMP_MASTER, 0, arg) \
macro(OMP_TEAMS, 0, arg) \
macro(OMP_set_lock, 0, arg) \
macro(OMP_test_lock, 0, arg) \
macro(REDUCE_wait, 0, arg) \
macro(REDUCE_nowait, 0, arg) \
macro(OMP_TASKYIELD, 0, arg) \
macro(OMP_TASKLOOP, 0, arg) \
macro(TASK_executed, 0, arg) \
macro(TASK_cancelled, 0, arg) \
macro(TASK_stolen, 0, arg)
// clang-format on
/*!
* \brief Add new timers under KMP_FOREACH_TIMER() macro in kmp_stats.h
*
* @param macro a user defined macro that takes three arguments -
* macro(TIMER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \details A timer collects multiple samples of some count in each thread and
* then finally aggregates all of the samples from all of the threads. For most
* timers the printing code also provides an aggregation over the thread totals.
* These are printed as TOTAL_foo. The count is normally a time (in ticks),
* hence the name "timer". (But can be any value, so we use this for "number of
* arguments passed to fork" as well). For timers the threads are not
* significant, it's the individual observations that count, so the statistics
* are at that level. Format is "macro(name, flags, arg)"
*
* @ingroup STATS_GATHERING2
*/
// clang-format off
#define KMP_FOREACH_TIMER(macro, arg) \
macro (OMP_worker_thread_life, stats_flags_e::logEvent, arg) \
macro (OMP_parallel, stats_flags_e::logEvent, arg) \
macro (OMP_parallel_overhead, stats_flags_e::logEvent, arg) \
macro (OMP_loop_static, 0, arg) \
macro (OMP_loop_static_scheduling, 0, arg) \
macro (OMP_loop_dynamic, 0, arg) \
macro (OMP_loop_dynamic_scheduling, 0, arg) \
macro (OMP_critical, 0, arg) \
macro (OMP_critical_wait, 0, arg) \
macro (OMP_single, 0, arg) \
macro (OMP_master, 0, arg) \
macro (OMP_task_immediate, 0, arg) \
macro (OMP_task_taskwait, 0, arg) \
macro (OMP_task_taskyield, 0, arg) \
macro (OMP_task_taskgroup, 0, arg) \
macro (OMP_task_join_bar, 0, arg) \
macro (OMP_task_plain_bar, 0, arg) \
macro (OMP_taskloop_scheduling, 0, arg) \
macro (OMP_plain_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_idle, stats_flags_e::logEvent, arg) \
macro (OMP_fork_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_join_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_serial, stats_flags_e::logEvent, arg) \
macro (OMP_set_numthreads, stats_flags_e::noUnits | stats_flags_e::noTotal, \
arg) \
macro (OMP_PARALLEL_args, stats_flags_e::noUnits | stats_flags_e::noTotal, \
arg) \
macro (OMP_loop_static_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_loop_dynamic_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
KMP_FOREACH_DEVELOPER_TIMER(macro, arg)
// clang-format on
// OMP_worker_thread_life -- Time from thread becoming an OpenMP thread (either
// initializing OpenMP or being created by a master)
// until the thread is destroyed
// OMP_parallel -- Time thread spends executing work directly
// within a #pragma omp parallel
// OMP_parallel_overhead -- Time thread spends setting up a parallel region
// OMP_loop_static -- Time thread spends executing loop iterations from
// a statically scheduled loop
// OMP_loop_static_scheduling -- Time thread spends scheduling loop iterations
// from a statically scheduled loop
// OMP_loop_dynamic -- Time thread spends executing loop iterations from
// a dynamically scheduled loop
// OMP_loop_dynamic_scheduling -- Time thread spends scheduling loop iterations
// from a dynamically scheduled loop
// OMP_critical -- Time thread spends executing critical section
// OMP_critical_wait -- Time thread spends waiting to enter
// a critcal seciton
// OMP_single -- Time spent executing a "single" region
// OMP_master -- Time spent executing a "master" region
// OMP_task_immediate -- Time spent executing non-deferred tasks
// OMP_task_taskwait -- Time spent executing tasks inside a taskwait
// construct
// OMP_task_taskyield -- Time spent executing tasks inside a taskyield
// construct
// OMP_task_taskgroup -- Time spent executing tasks inside a taskygroup
// construct
// OMP_task_join_bar -- Time spent executing tasks inside a join barrier
// OMP_task_plain_bar -- Time spent executing tasks inside a barrier
// construct
// OMP_taskloop_scheduling -- Time spent scheduling tasks inside a taskloop
// construct
// OMP_plain_barrier -- Time spent in a #pragma omp barrier construct or
// inside implicit barrier at end of worksharing
// construct
// OMP_idle -- Time worker threads spend waiting for next
// parallel region
// OMP_fork_barrier -- Time spent in a the fork barrier surrounding a
// parallel region
// OMP_join_barrier -- Time spent in a the join barrier surrounding a
// parallel region
// OMP_serial -- Time thread zero spends executing serial code
// OMP_set_numthreads -- Values passed to omp_set_num_threads
// OMP_PARALLEL_args -- Number of arguments passed to a parallel region
// OMP_loop_static_iterations -- Number of iterations thread is assigned for
// statically scheduled loops
// OMP_loop_dynamic_iterations -- Number of iterations thread is assigned for
// dynamically scheduled loops
#if (KMP_DEVELOPER_STATS)
// Timers which are of interest to runtime library developers, not end users.
// These have to be explicitly enabled in addition to the other stats.
// KMP_fork_barrier -- time in __kmp_fork_barrier
// KMP_join_barrier -- time in __kmp_join_barrier
// KMP_barrier -- time in __kmp_barrier
// KMP_end_split_barrier -- time in __kmp_end_split_barrier
// KMP_setup_icv_copy -- time in __kmp_setup_icv_copy
// KMP_icv_copy -- start/stop timer for any ICV copying
// KMP_linear_gather -- time in __kmp_linear_barrier_gather
// KMP_linear_release -- time in __kmp_linear_barrier_release
// KMP_tree_gather -- time in __kmp_tree_barrier_gather
// KMP_tree_release -- time in __kmp_tree_barrier_release
// KMP_hyper_gather -- time in __kmp_hyper_barrier_gather
// KMP_hyper_release -- time in __kmp_hyper_barrier_release
// clang-format off
#define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) \
macro(KMP_fork_call, 0, arg) \
macro(KMP_join_call, 0, arg) \
macro(KMP_end_split_barrier, 0, arg) \
macro(KMP_hier_gather, 0, arg) \
macro(KMP_hier_release, 0, arg) \
macro(KMP_hyper_gather, 0, arg) \
macro(KMP_hyper_release, 0, arg) \
macro(KMP_linear_gather, 0, arg) \
macro(KMP_linear_release, 0, arg) \
macro(KMP_tree_gather, 0, arg) \
macro(KMP_tree_release, 0, arg) \
macro(USER_resume, 0, arg) \
macro(USER_suspend, 0, arg) \
macro(KMP_allocate_team, 0, arg) \
macro(KMP_setup_icv_copy, 0, arg) \
macro(USER_icv_copy, 0, arg) \
macro (FOR_static_steal_stolen, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (FOR_static_steal_chunks, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg)
#else
#define KMP_FOREACH_DEVELOPER_TIMER(macro, arg)
#endif
// clang-format on
/*!
* \brief Add new explicit timers under KMP_FOREACH_EXPLICIT_TIMER() macro.
*
* @param macro a user defined macro that takes three arguments -
* macro(TIMER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \warning YOU MUST HAVE THE SAME NAMED TIMER UNDER KMP_FOREACH_TIMER() OR ELSE
* BAD THINGS WILL HAPPEN!
*
* \details Explicit timers are ones where we need to allocate a timer itself
* (as well as the accumulated timing statistics). We allocate these on a
* per-thread basis, and explicitly start and stop them. Block timers just
* allocate the timer itself on the stack, and use the destructor to notice
* block exit; they don't need to be defined here. The name here should be the
* same as that of a timer above.
*
* @ingroup STATS_GATHERING
*/
#define KMP_FOREACH_EXPLICIT_TIMER(macro, arg) KMP_FOREACH_TIMER(macro, arg)
#define ENUMERATE(name, ignore, prefix) prefix##name,
enum timer_e { KMP_FOREACH_TIMER(ENUMERATE, TIMER_) TIMER_LAST };
enum explicit_timer_e {
KMP_FOREACH_EXPLICIT_TIMER(ENUMERATE, EXPLICIT_TIMER_) EXPLICIT_TIMER_LAST
};
enum counter_e { KMP_FOREACH_COUNTER(ENUMERATE, COUNTER_) COUNTER_LAST };
#undef ENUMERATE
/*
* A logarithmic histogram. It accumulates the number of values in each power of
* ten bin. So 1<=x<10, 10<=x<100, ...
* Mostly useful where we have some big outliers and want to see information
* about them.
*/
class logHistogram {
enum {
numBins = 31, /* Number of powers of 10. If this changes you need to change
* the initializer for binMax */
/*
* If you want to use this to analyse values that may be less than 1, (for
* instance times in s), then the logOffset gives you negative powers.
* In our case here, we're just looking at times in ticks, or counts, so we
* can never see values with magnitude < 1 (other than zero), so we can set
* it to 0. As above change the initializer if you change this.
*/
logOffset = 0
};
uint32_t KMP_ALIGN_CACHE zeroCount;
struct {
uint32_t count;
double total;
} bins[numBins];
static double binMax[numBins];
#ifdef KMP_DEBUG
uint64_t _total;
void check() const {
uint64_t t = zeroCount;
for (int i = 0; i < numBins; i++)
t += bins[i].count;
KMP_DEBUG_ASSERT(t == _total);
}
#else
void check() const {}
#endif
public:
logHistogram() { reset(); }
logHistogram(logHistogram const &o) {
for (int i = 0; i < numBins; i++)
bins[i] = o.bins[i];
#ifdef KMP_DEBUG
_total = o._total;
#endif
}
void reset() {
zeroCount = 0;
for (int i = 0; i < numBins; i++) {
bins[i].count = 0;
bins[i].total = 0;
}
#ifdef KMP_DEBUG
_total = 0;
#endif
}
uint32_t count(int b) const { return bins[b + logOffset].count; }
double total(int b) const { return bins[b + logOffset].total; }
static uint32_t findBin(double sample);
logHistogram &operator+=(logHistogram const &o) {
zeroCount += o.zeroCount;
for (int i = 0; i < numBins; i++) {
bins[i].count += o.bins[i].count;
bins[i].total += o.bins[i].total;
}
#ifdef KMP_DEBUG
_total += o._total;
check();
#endif
return *this;
}
void addSample(double sample);
int minBin() const;
int maxBin() const;
std::string format(char) const;
};
class statistic {
double KMP_ALIGN_CACHE minVal;
double maxVal;
double meanVal;
double m2;
uint64_t sampleCount;
double offset;
bool collectingHist;
logHistogram hist;
public:
statistic(bool doHist = bool(KMP_STATS_HIST)) {
reset();
collectingHist = doHist;
}
statistic(statistic const &o)
: minVal(o.minVal), maxVal(o.maxVal), meanVal(o.meanVal), m2(o.m2),
sampleCount(o.sampleCount), offset(o.offset),
collectingHist(o.collectingHist), hist(o.hist) {}
statistic(double minv, double maxv, double meanv, uint64_t sc, double sd)
: minVal(minv), maxVal(maxv), meanVal(meanv), m2(sd * sd * sc),
sampleCount(sc), offset(0.0), collectingHist(false) {}
bool haveHist() const { return collectingHist; }
double getMin() const { return minVal; }
double getMean() const { return meanVal; }
double getMax() const { return maxVal; }
uint64_t getCount() const { return sampleCount; }
double getSD() const { return sqrt(m2 / sampleCount); }
double getTotal() const { return sampleCount * meanVal; }
logHistogram const *getHist() const { return &hist; }
void setOffset(double d) { offset = d; }
void reset() {
minVal = std::numeric_limits<double>::max();
maxVal = -minVal;
meanVal = 0.0;
m2 = 0.0;
sampleCount = 0;
offset = 0.0;
hist.reset();
}
void addSample(double sample);
void scale(double factor);
void scaleDown(double f) { scale(1. / f); }
void forceCount(uint64_t count) { sampleCount = count; }
statistic &operator+=(statistic const &other);
std::string format(char unit, bool total = false) const;
std::string formatHist(char unit) const { return hist.format(unit); }
};
struct statInfo {
const char *name;
uint32_t flags;
};
class timeStat : public statistic {
static statInfo timerInfo[];
public:
timeStat() : statistic() {}
static const char *name(timer_e e) { return timerInfo[e].name; }
static bool noTotal(timer_e e) {
return timerInfo[e].flags & stats_flags_e::noTotal;
}
static bool masterOnly(timer_e e) {
return timerInfo[e].flags & stats_flags_e::onlyInMaster;
}
static bool workerOnly(timer_e e) {
return timerInfo[e].flags & stats_flags_e::notInMaster;
}
static bool noUnits(timer_e e) {
return timerInfo[e].flags & stats_flags_e::noUnits;
}
static bool logEvent(timer_e e) {
return timerInfo[e].flags & stats_flags_e::logEvent;
}
static void clearEventFlags() {
for (int i = 0; i < TIMER_LAST; i++) {
timerInfo[i].flags &= (~(stats_flags_e::logEvent));
}
}
};
// Where we need explicitly to start and end the timer, this version can be used
// Since these timers normally aren't nicely scoped, so don't have a good place
// to live on the stack of the thread, they're more work to use.
class explicitTimer {
timeStat *stat;
timer_e timerEnumValue;
tsc_tick_count startTime;
tsc_tick_count pauseStartTime;
tsc_tick_count::tsc_interval_t totalPauseTime;
public:
explicitTimer(timeStat *s, timer_e te)
: stat(s), timerEnumValue(te), startTime(), pauseStartTime(0),
totalPauseTime() {}
// void setStat(timeStat *s) { stat = s; }
void start(tsc_tick_count tick);
void pause(tsc_tick_count tick) { pauseStartTime = tick; }
void resume(tsc_tick_count tick) {
totalPauseTime += (tick - pauseStartTime);
}
void stop(tsc_tick_count tick, kmp_stats_list *stats_ptr = nullptr);
void reset() {
startTime = 0;
pauseStartTime = 0;
totalPauseTime = 0;
}
timer_e get_type() const { return timerEnumValue; }
};
// Where you need to partition a threads clock ticks into separate states
// e.g., a partitionedTimers class with two timers of EXECUTING_TASK, and
// DOING_NOTHING would render these conditions:
// time(EXECUTING_TASK) + time(DOING_NOTHING) = total time thread is alive
// No clock tick in the EXECUTING_TASK is a member of DOING_NOTHING and vice
// versa
class partitionedTimers {
private:
std::vector<explicitTimer> timer_stack;
public:
partitionedTimers();
void init(explicitTimer timer);
void exchange(explicitTimer timer);
void push(explicitTimer timer);
void pop();
void windup();
};
// Special wrapper around the partioned timers to aid timing code blocks
// It avoids the need to have an explicit end, leaving the scope suffices.
class blockPartitionedTimer {
partitionedTimers *part_timers;
public:
blockPartitionedTimer(partitionedTimers *pt, explicitTimer timer)
: part_timers(pt) {
part_timers->push(timer);
}
~blockPartitionedTimer() { part_timers->pop(); }
};
// Special wrapper around the thread state to aid in keeping state in code
// blocks It avoids the need to have an explicit end, leaving the scope
// suffices.
class blockThreadState {
stats_state_e *state_pointer;
stats_state_e old_state;
public:
blockThreadState(stats_state_e *thread_state_pointer, stats_state_e new_state)
: state_pointer(thread_state_pointer), old_state(*thread_state_pointer) {
*state_pointer = new_state;
}
~blockThreadState() { *state_pointer = old_state; }
};
// If all you want is a count, then you can use this...
// The individual per-thread counts will be aggregated into a statistic at
// program exit.
class counter {
uint64_t value;
static const statInfo counterInfo[];
public:
counter() : value(0) {}
void increment() { value++; }
uint64_t getValue() const { return value; }
void reset() { value = 0; }
static const char *name(counter_e e) { return counterInfo[e].name; }
static bool masterOnly(counter_e e) {
return counterInfo[e].flags & stats_flags_e::onlyInMaster;
}
};
/* ****************************************************************
Class to implement an event
There are four components to an event: start time, stop time
nest_level, and timer_name.
The start and stop time should be obvious (recorded in clock ticks).
The nest_level relates to the bar width in the timeline graph.
The timer_name is used to determine which timer event triggered this event.
the interface to this class is through four read-only operations:
1) getStart() -- returns the start time as 64 bit integer
2) getStop() -- returns the stop time as 64 bit integer
3) getNestLevel() -- returns the nest level of the event
4) getTimerName() -- returns the timer name that triggered event
*MORE ON NEST_LEVEL*
The nest level is used in the bar graph that represents the timeline.
Its main purpose is for showing how events are nested inside eachother.
For example, say events, A, B, and C are recorded. If the timeline
looks like this:
Begin -------------------------------------------------------------> Time
| | | | | |
A B C C B A
start start start end end end
Then A, B, C will have a nest level of 1, 2, 3 respectively.
These values are then used to calculate the barwidth so you can
see that inside A, B has occurred, and inside B, C has occurred.
Currently, this is shown with A's bar width being larger than B's
bar width, and B's bar width being larger than C's bar width.
**************************************************************** */
class kmp_stats_event {
uint64_t start;
uint64_t stop;
int nest_level;
timer_e timer_name;
public:
kmp_stats_event()
: start(0), stop(0), nest_level(0), timer_name(TIMER_LAST) {}
kmp_stats_event(uint64_t strt, uint64_t stp, int nst, timer_e nme)
: start(strt), stop(stp), nest_level(nst), timer_name(nme) {}
inline uint64_t getStart() const { return start; }
inline uint64_t getStop() const { return stop; }
inline int getNestLevel() const { return nest_level; }
inline timer_e getTimerName() const { return timer_name; }
};
/* ****************************************************************
Class to implement a dynamically expandable array of events
---------------------------------------------------------
| event 1 | event 2 | event 3 | event 4 | ... | event N |
---------------------------------------------------------
An event is pushed onto the back of this array at every
explicitTimer->stop() call. The event records the thread #,
start time, stop time, and nest level related to the bar width.
The event vector starts at size INIT_SIZE and grows (doubles in size)
if needed. An implication of this behavior is that log(N)
reallocations are needed (where N is number of events). If you want
to avoid reallocations, then set INIT_SIZE to a large value.
the interface to this class is through six operations:
1) reset() -- sets the internal_size back to 0 but does not deallocate any
memory
2) size() -- returns the number of valid elements in the vector
3) push_back(start, stop, nest, timer_name) -- pushes an event onto
the back of the array
4) deallocate() -- frees all memory associated with the vector
5) sort() -- sorts the vector by start time
6) operator[index] or at(index) -- returns event reference at that index
**************************************************************** */
class kmp_stats_event_vector {
kmp_stats_event *events;
int internal_size;
int allocated_size;
static const int INIT_SIZE = 1024;
public:
kmp_stats_event_vector() {
events =
(kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE);
internal_size = 0;
allocated_size = INIT_SIZE;
}
~kmp_stats_event_vector() {}
inline void reset() { internal_size = 0; }
inline int size() const { return internal_size; }
void push_back(uint64_t start_time, uint64_t stop_time, int nest_level,
timer_e name) {
int i;
if (internal_size == allocated_size) {
kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate(
sizeof(kmp_stats_event) * allocated_size * 2);
for (i = 0; i < internal_size; i++)
tmp[i] = events[i];
__kmp_free(events);
events = tmp;
allocated_size *= 2;
}
events[internal_size] =
kmp_stats_event(start_time, stop_time, nest_level, name);
internal_size++;
return;
}
void deallocate();
void sort();
const kmp_stats_event &operator[](int index) const { return events[index]; }
kmp_stats_event &operator[](int index) { return events[index]; }
const kmp_stats_event &at(int index) const { return events[index]; }
kmp_stats_event &at(int index) { return events[index]; }
};
/* ****************************************************************
Class to implement a doubly-linked, circular, statistics list
|---| ---> |---| ---> |---| ---> |---| ---> ... next
| | | | | | | |
|---| <--- |---| <--- |---| <--- |---| <--- ... prev
Sentinel first second third
Node node node node
The Sentinel Node is the user handle on the list.
The first node corresponds to thread 0's statistics.
The second node corresponds to thread 1's statistics and so on...
Each node has a _timers, _counters, and _explicitTimers array to hold that
thread's statistics. The _explicitTimers point to the correct _timer and
update its statistics at every stop() call. The explicitTimers' pointers are
set up in the constructor. Each node also has an event vector to hold that
thread's timing events. The event vector expands as necessary and records
the start-stop times for each timer.
The nestLevel variable is for plotting events and is related
to the bar width in the timeline graph.
Every thread will have a thread local pointer to its node in
the list. The sentinel node is used by the master thread to
store "dummy" statistics before __kmp_create_worker() is called.
**************************************************************** */
class kmp_stats_list {
int gtid;
timeStat _timers[TIMER_LAST + 1];
counter _counters[COUNTER_LAST + 1];
explicitTimer thread_life_timer;
partitionedTimers _partitionedTimers;
int _nestLevel; // one per thread
kmp_stats_event_vector _event_vector;
kmp_stats_list *next;
kmp_stats_list *prev;
stats_state_e state;
int thread_is_idle_flag;
public:
kmp_stats_list()
: thread_life_timer(&_timers[TIMER_OMP_worker_thread_life],
TIMER_OMP_worker_thread_life),
_nestLevel(0), _event_vector(), next(this), prev(this), state(IDLE),
thread_is_idle_flag(0) {}
~kmp_stats_list() {}
inline timeStat *getTimer(timer_e idx) { return &_timers[idx]; }
inline counter *getCounter(counter_e idx) { return &_counters[idx]; }
inline partitionedTimers *getPartitionedTimers() {
return &_partitionedTimers;
}
inline timeStat *getTimers() { return _timers; }
inline counter *getCounters() { return _counters; }
inline kmp_stats_event_vector &getEventVector() { return _event_vector; }
inline void startLife() { thread_life_timer.start(tsc_tick_count::now()); }
inline void endLife() { thread_life_timer.stop(tsc_tick_count::now(), this); }
inline void resetEventVector() { _event_vector.reset(); }
inline void incrementNestValue() { _nestLevel++; }
inline int getNestValue() { return _nestLevel; }
inline void decrementNestValue() { _nestLevel--; }
inline int getGtid() const { return gtid; }
inline void setGtid(int newgtid) { gtid = newgtid; }
inline void setState(stats_state_e newstate) { state = newstate; }
inline stats_state_e getState() const { return state; }
inline stats_state_e *getStatePointer() { return &state; }
inline bool isIdle() { return thread_is_idle_flag == 1; }
inline void setIdleFlag() { thread_is_idle_flag = 1; }
inline void resetIdleFlag() { thread_is_idle_flag = 0; }
kmp_stats_list *push_back(int gtid); // returns newly created list node
inline void push_event(uint64_t start_time, uint64_t stop_time,
int nest_level, timer_e name) {
_event_vector.push_back(start_time, stop_time, nest_level, name);
}
void deallocate();
class iterator;
kmp_stats_list::iterator begin();
kmp_stats_list::iterator end();
int size();
class iterator {
kmp_stats_list *ptr;
friend kmp_stats_list::iterator kmp_stats_list::begin();
friend kmp_stats_list::iterator kmp_stats_list::end();
public:
iterator();
~iterator();
iterator operator++();
iterator operator++(int dummy);
iterator operator--();
iterator operator--(int dummy);
bool operator!=(const iterator &rhs);
bool operator==(const iterator &rhs);
kmp_stats_list *operator*() const; // dereference operator
};
};
/* ****************************************************************
Class to encapsulate all output functions and the environment variables
This module holds filenames for various outputs (normal stats, events, plot
file), as well as coloring information for the plot file.
The filenames and flags variables are read from environment variables.
These are read once by the constructor of the global variable
__kmp_stats_output which calls init().
During this init() call, event flags for the timeStat::timerInfo[] global
array are cleared if KMP_STATS_EVENTS is not true (on, 1, yes).
The only interface function that is public is outputStats(heading). This
function should print out everything it needs to, either to files or stderr,
depending on the environment variables described below
ENVIRONMENT VARIABLES:
KMP_STATS_FILE -- if set, all statistics (not events) will be printed to this
file, otherwise, print to stderr
KMP_STATS_THREADS -- if set to "on", then will print per thread statistics to
either KMP_STATS_FILE or stderr
KMP_STATS_PLOT_FILE -- if set, print the ploticus plot file to this filename,
otherwise, the plot file is sent to "events.plt"
KMP_STATS_EVENTS -- if set to "on", then log events, otherwise, don't log
events
KMP_STATS_EVENTS_FILE -- if set, all events are outputted to this file,
otherwise, output is sent to "events.dat"
**************************************************************** */
class kmp_stats_output_module {
public:
struct rgb_color {
float r;
float g;
float b;
};
private:
std::string outputFileName;
static const char *eventsFileName;
static const char *plotFileName;
static int printPerThreadFlag;
static int printPerThreadEventsFlag;
static const rgb_color globalColorArray[];
static rgb_color timerColorInfo[];
void init();
static void setupEventColors();
static void printPloticusFile();
static void printHeaderInfo(FILE *statsOut);
static void printTimerStats(FILE *statsOut, statistic const *theStats,
statistic const *totalStats);
static void printCounterStats(FILE *statsOut, statistic const *theStats);
static void printCounters(FILE *statsOut, counter const *theCounters);
static void printEvents(FILE *eventsOut, kmp_stats_event_vector *theEvents,
int gtid);
static rgb_color getEventColor(timer_e e) { return timerColorInfo[e]; }
static void windupExplicitTimers();
bool eventPrintingEnabled() const { return printPerThreadEventsFlag; }
public:
kmp_stats_output_module() { init(); }
void outputStats(const char *heading);
};
#ifdef __cplusplus
extern "C" {
#endif
void __kmp_stats_init();
void __kmp_stats_fini();
void __kmp_reset_stats();
void __kmp_output_stats(const char *);
void __kmp_accumulate_stats_at_exit(void);
// thread local pointer to stats node within list
extern KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr;
// head to stats list.
extern kmp_stats_list *__kmp_stats_list;
// lock for __kmp_stats_list
extern kmp_tas_lock_t __kmp_stats_lock;
// reference start time
extern tsc_tick_count __kmp_stats_start_time;
// interface to output
extern kmp_stats_output_module __kmp_stats_output;
#ifdef __cplusplus
}
#endif
// Simple, standard interfaces that drop out completely if stats aren't enabled
/*!
* \brief Adds value to specified timer (name).
*
* @param name timer name as specified under the KMP_FOREACH_TIMER() macro
* @param value double precision sample value to add to statistics for the timer
*
* \details Use KMP_COUNT_VALUE(name, value) macro to add a particular value to
* a timer statistics.
*
* @ingroup STATS_GATHERING
*/
#define KMP_COUNT_VALUE(name, value) \
__kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample(value)
/*!
* \brief Increments specified counter (name).
*
* @param name counter name as specified under the KMP_FOREACH_COUNTER() macro
*
* \details Use KMP_COUNT_BLOCK(name, value) macro to increment a statistics
* counter for the executing thread.
*
* @ingroup STATS_GATHERING
*/
#define KMP_COUNT_BLOCK(name) \
__kmp_stats_thread_ptr->getCounter(COUNTER_##name)->increment()
/*!
* \brief Outputs the current thread statistics and reset them.
*
* @param heading_string heading put above the final stats output
*
* \details Explicitly stops all timers and outputs all stats. Environment
* variable, `OMPTB_STATSFILE=filename`, can be used to output the stats to a
* filename instead of stderr. Environment variable,
* `OMPTB_STATSTHREADS=true|undefined`, can be used to output thread specific
* stats. For now the `OMPTB_STATSTHREADS` environment variable can either be
* defined with any value, which will print out thread specific stats, or it can
* be undefined (not specified in the environment) and thread specific stats
* won't be printed. It should be noted that all statistics are reset when this
* macro is called.
*
* @ingroup STATS_GATHERING
*/
#define KMP_OUTPUT_STATS(heading_string) __kmp_output_stats(heading_string)
/*!
* \brief Initializes the paritioned timers to begin with name.
*
* @param name timer which you want this thread to begin with
*
* @ingroup STATS_GATHERING
*/
#define KMP_INIT_PARTITIONED_TIMERS(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->init(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_TIME_PARTITIONED_BLOCK(name) \
blockPartitionedTimer __PBLOCKTIME__( \
__kmp_stats_thread_ptr->getPartitionedTimers(), \
explicitTimer(__kmp_stats_thread_ptr->getTimer(TIMER_##name), \
TIMER_##name))
#define KMP_PUSH_PARTITIONED_TIMER(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->push(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_POP_PARTITIONED_TIMER() \
__kmp_stats_thread_ptr->getPartitionedTimers()->pop()
#define KMP_EXCHANGE_PARTITIONED_TIMER(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->exchange(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_SET_THREAD_STATE(state_name) \
__kmp_stats_thread_ptr->setState(state_name)
#define KMP_GET_THREAD_STATE() __kmp_stats_thread_ptr->getState()
#define KMP_SET_THREAD_STATE_BLOCK(state_name) \
blockThreadState __BTHREADSTATE__(__kmp_stats_thread_ptr->getStatePointer(), \
state_name)
/*!
* \brief resets all stats (counters to 0, timers to 0 elapsed ticks)
*
* \details Reset all stats for all threads.
*
* @ingroup STATS_GATHERING
*/
#define KMP_RESET_STATS() __kmp_reset_stats()
#if (KMP_DEVELOPER_STATS)
#define KMP_TIME_DEVELOPER_BLOCK(n) KMP_TIME_BLOCK(n)
#define KMP_COUNT_DEVELOPER_VALUE(n, v) KMP_COUNT_VALUE(n, v)
#define KMP_COUNT_DEVELOPER_BLOCK(n) KMP_COUNT_BLOCK(n)
#define KMP_START_DEVELOPER_EXPLICIT_TIMER(n) KMP_START_EXPLICIT_TIMER(n)
#define KMP_STOP_DEVELOPER_EXPLICIT_TIMER(n) KMP_STOP_EXPLICIT_TIMER(n)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) KMP_TIME_PARTITIONED_BLOCK(n)
#else
// Null definitions
#define KMP_TIME_DEVELOPER_BLOCK(n) ((void)0)
#define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0)
#define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0)
#define KMP_START_DEVELOPER_EXPLICIT_TIMER(n) ((void)0)
#define KMP_STOP_DEVELOPER_EXPLICIT_TIMER(n) ((void)0)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0)
#endif
#else // KMP_STATS_ENABLED
// Null definitions
#define KMP_TIME_BLOCK(n) ((void)0)
#define KMP_COUNT_VALUE(n, v) ((void)0)
#define KMP_COUNT_BLOCK(n) ((void)0)
#define KMP_START_EXPLICIT_TIMER(n) ((void)0)
#define KMP_STOP_EXPLICIT_TIMER(n) ((void)0)
#define KMP_OUTPUT_STATS(heading_string) ((void)0)
#define KMP_RESET_STATS() ((void)0)
#define KMP_TIME_DEVELOPER_BLOCK(n) ((void)0)
#define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0)
#define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0)
#define KMP_START_DEVELOPER_EXPLICIT_TIMER(n) ((void)0)
#define KMP_STOP_DEVELOPER_EXPLICIT_TIMER(n) ((void)0)
#define KMP_INIT_PARTITIONED_TIMERS(name) ((void)0)
#define KMP_TIME_PARTITIONED_BLOCK(name) ((void)0)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0)
#define KMP_PUSH_PARTITIONED_TIMER(name) ((void)0)
#define KMP_POP_PARTITIONED_TIMER() ((void)0)
#define KMP_SET_THREAD_STATE(state_name) ((void)0)
#define KMP_GET_THREAD_STATE() ((void)0)
#define KMP_SET_THREAD_STATE_BLOCK(state_name) ((void)0)
#endif // KMP_STATS_ENABLED
#endif // KMP_STATS_H
|
CPUVF2.h | #ifndef __CPU_VF2_H__
#define __CPU_VF2_H__
#include "CPUFilter.h"
#include "CPUGraph.h"
#include "CPUIntersection.h"
#include "CPUPatternMatch.h"
#include "TimeMeasurer.h"
#if defined(OPENMP)
#include <omp.h>
#endif
#include <unordered_map>
class CPUVF2 : public CPUPatternMatch {
public:
CPUVF2(TraversalPlan *plan, Graph *graph, size_t thread_num)
: CPUPatternMatch(thread_num), plan_(plan), graph_(graph) {}
virtual ~CPUVF2() {}
virtual void Execute() {
std::vector<std::vector<uintV> > intersect_levels;
plan_->GetOrderedConnectivity(intersect_levels);
AllCondType conditions;
plan_->GetOrderedOrdering(conditions);
std::vector<uintV> levels_new_nghrs(plan_->GetVertexCount());
std::vector<uintV> levels_old_nghrs(plan_->GetVertexCount());
auto &con = plan_->GetConnectivity();
for (uintV level = 0; level < plan_->GetVertexCount(); ++level) {
levels_old_nghrs[level] = 0;
levels_new_nghrs[level] = 0;
for (size_t pred_id = 0; pred_id < con[level].size(); ++pred_id) {
auto l = con[level][pred_id];
if (l > level) {
bool in_neighborhood = false;
for (size_t j = 0; j < con[l].size(); ++j) {
if (con[l][j] < level) {
in_neighborhood = true;
}
}
if (in_neighborhood)
levels_old_nghrs[level]++;
else
levels_new_nghrs[level]++;
}
}
}
uintV **paths = new uintV *[thread_num_];
std::unordered_map<uintV, uintV> *in_neighborhood =
new std::unordered_map<uintV, uintV>[thread_num_];
for (size_t i = 0; i < thread_num_; ++i) {
paths[i] = new uintV[plan_->GetVertexCount()];
}
omp_set_num_threads(thread_num_);
TimeMeasurer timer;
timer.StartTimer();
long long total_match_count = 0;
#pragma omp parallel for schedule(dynamic) reduction(+ : total_match_count)
for (uintV u = 0; u < graph_->GetVertexCount(); ++u) {
long long ans = 0;
size_t thread_id = omp_get_thread_num();
#if defined(PROFILE)
TimeMeasurer timer2;
timer2.StartTimer();
#endif
in_neighborhood[thread_id].clear();
paths[thread_id][0] = u;
UpdateNeighborHood(u, in_neighborhood[thread_id]);
DFS(thread_id, 1, paths[thread_id], ans, intersect_levels, conditions,
levels_old_nghrs, levels_new_nghrs, in_neighborhood[thread_id]);
Backtrack(u, in_neighborhood[thread_id]);
#if defined(PROFILE)
timer2.EndTimer();
thread_time_[thread_id] += timer2.GetElapsedMicroSeconds();
#endif
total_match_count += ans;
}
timer.EndTimer();
this->SetTotalMatchCount(total_match_count);
std::cout << "total_match_count=" << total_match_count
<< ", elapsed_time=" << timer.GetElapsedMicroSeconds() / 1000.0
<< "ms" << std::endl;
for (size_t i = 0; i < thread_num_; ++i) {
delete[] paths[i];
paths[i] = NULL;
}
delete[] paths;
paths = NULL;
delete[] in_neighborhood;
in_neighborhood = NULL;
#if defined(PROFILE)
for (size_t thread_id = 0; thread_id < thread_num_; ++thread_id) {
std::cout << "thread_id=" << thread_id
<< ",time=" << thread_time_[thread_id] / 1000.0 << "ms"
<< std::endl;
}
#endif
}
private:
void DFS(size_t thread_id, uintV cur_level, uintV *path,
long long &ans,
std::vector<std::vector<uintV>> &intersect_levels,
AllCondType &conditions, std::vector<uintV> &levels_old_nghrs,
std::vector<uintV> &levels_new_nghrs,
std::unordered_map<uintV, uintV> &in_neighborhood) {
if (cur_level == plan_->GetVertexCount()) {
ans++;
return;
}
if (intersect_levels[cur_level].size() == 0) {
for (uintV i = 0; i < graph_->GetVertexCount(); ++i) {
if (CheckCondition(path, i, conditions[cur_level]) == false ||
CheckEquality(path, cur_level, i))
continue;
path[cur_level] = i;
UpdateNeighborHood(i, in_neighborhood);
DFS(thread_id, cur_level + 1, path, ans, intersect_levels, conditions,
levels_old_nghrs, levels_new_nghrs, in_neighborhood);
Backtrack(i, in_neighborhood);
}
} else {
// 3 rules described in "An in-depth comparison of
// subgrah isomorphism algorithms in graph databases"
// rule 1: connectivity
auto row_ptrs = graph_->GetRowPtrs();
auto cols = graph_->GetCols();
std::vector<uintV> res[2];
for (size_t j = 0; j < intersect_levels[cur_level].size(); ++j) {
auto p2 = intersect_levels[cur_level][j];
auto first = path[p2];
auto first_begin = &cols[row_ptrs[first]];
auto first_end = &cols[row_ptrs[first + 1]];
if (j == 0) {
res[j % 2].assign(first_begin, first_end);
} else {
size_t max_size = std::min((size_t)(first_end - first_begin),
res[(j + 1) % 2].size());
res[j % 2].resize(max_size);
size_t res_size = SortedIntersection(
first_begin, first_end, res[(j + 1) % 2].begin(),
res[(j + 1) % 2].end(), res[j % 2].begin());
assert(res_size <= max_size);
res[j % 2].resize(res_size);
}
}
std::vector<uintV> &candidates =
res[(intersect_levels[cur_level].size() + 1) % 2];
for (size_t i = 0; i < candidates.size(); ++i) {
if (CheckCondition(path, candidates[i], conditions[cur_level]) ==
false ||
CheckEquality(path, cur_level, candidates[i]))
continue;
size_t covered_count = 0, uncovered_count = 0;
GetCoveredNeighborCount(candidates[i], path, cur_level, in_neighborhood,
covered_count, uncovered_count);
// rule 2: #data vertices in the neighborhood path >=
// #query vertices in the neighborhoold of matched levels
if (covered_count < levels_old_nghrs[cur_level]) continue;
// rule 3: #the neighbors of candidates[i] that are not matched
// or neighbors of matched data vertices >=
// #the neighbors of cur_level that are not matched or neighbors of
// matched query vertices
if (covered_count + uncovered_count - levels_old_nghrs[cur_level] <
levels_new_nghrs[cur_level])
continue;
path[cur_level] = candidates[i];
UpdateNeighborHood(candidates[i], in_neighborhood);
DFS(thread_id, cur_level + 1, path, ans, intersect_levels, conditions,
levels_old_nghrs, levels_new_nghrs, in_neighborhood);
Backtrack(candidates[i], in_neighborhood);
}
}
}
void GetCoveredNeighborCount(uintV v, uintV *path, size_t path_length,
std::unordered_map<uintV, uintV> &in_neighborhood,
size_t &covered_count,
size_t &uncovered_count) {
covered_count = 0;
uncovered_count = 0;
auto row_ptrs = graph_->GetRowPtrs();
auto cols = graph_->GetCols();
for (auto j = row_ptrs[v]; j < row_ptrs[v + 1]; ++j) {
auto nv = cols[j];
bool inpath = false;
for (size_t l = 0; l < path_length; ++l) {
if (path[l] == nv) {
inpath = true;
break;
}
}
if (!inpath) {
if (in_neighborhood.count(nv) && in_neighborhood[nv] > 0)
covered_count++;
else
uncovered_count++;
}
}
}
void UpdateNeighborHood(
uintV v, std::unordered_map<uintV, uintV> &in_neighborhood) {
// in_neighborhood maintains the set of vertices connected with
// the data vertices in the path
auto row_ptrs = graph_->GetRowPtrs();
auto cols = graph_->GetCols();
for (uintV j = row_ptrs[v]; j < row_ptrs[v + 1]; ++j) {
auto nv = cols[j];
if (in_neighborhood.count(nv) == 0) in_neighborhood[nv] = 0;
in_neighborhood[nv] += 1;
}
}
void Backtrack(uintV v,
std::unordered_map<uintV, uintV> &in_neighborhood) {
auto row_ptrs = graph_->GetRowPtrs();
auto cols = graph_->GetCols();
for (uintV j = row_ptrs[v]; j < row_ptrs[v + 1]; ++j) {
auto nv = cols[j];
in_neighborhood[nv] -= 1;
}
}
private:
TraversalPlan *plan_;
Graph *graph_;
};
#endif
|
ray.h | #ifndef RAY_H_
#define RAY_H_
#include <cmath>
#include <cfloat>
#include <embree2/rtcore_ray.h>
#include "../math/vector3.h"
#include "triangle.h"
#include "../utils/macros.h"
#include "material.h"
/*!
* \struct Ray
* \brief Rozšíření paprsku z Embree od další pay-load.
*
* \f$\mathbf{r}(t) = O + \hat{\mathbf{d}}t\f$
*
* \author Tomáš Fabián
* \version 0.8
* \date 2015S
*/
struct Ray : RTCRay {
float transparency;
float envIor = IOR_AIR;
Ray(const Vector3 &origin, Vector3 direction, const float t_near = 0.0f, const float t_far = FLT_MAX) {
org[0] = origin.x;
org[1] = origin.y;
org[2] = origin.z;
direction.Normalize(); // embree smerovy paprsek nenormalizuje
dir[0] = direction.x;
dir[1] = direction.y;
dir[2] = direction.z;
tnear = t_near;
tfar = t_far;
geomID = RTC_INVALID_GEOMETRY_ID;
primID = RTC_INVALID_GEOMETRY_ID;
instID = RTC_INVALID_GEOMETRY_ID;
mask = 0xFFFFFFFF;
time = 0.0f;
// --- payload ---
transparency = 3.14f;
}
Vector3 getIntersectPoint() {
return this->eval(this->tfar);
}
Vector3 getDir() {
Vector3 dir = this->dir;
dir.Normalize();
return dir;
}
Vector3 getNormal() {
Vector3 normal = -Vector3(this->Ng);
normal.Normalize();
return normal;
}
Vector3 eval(const float t) const {
return Vector3(
org[0] + dir[0] * t,
org[1] + dir[1] * t,
org[2] + dir[2] * t);
}
bool hasCollided() {
return this->geomID != RTC_INVALID_GEOMETRY_ID;
}
};
/*!
* \struct Ray
*\brief Paprsek.
*
*\f$\mathbf{r}(t) = O + \hat{\mathbf{d}}t\f$
*
*\author Tomáš Fabián
*\version 1.0
*\date 2011-2013
*/
struct RayOld {
Vector3 origin; /*!< Počátek paprsku. */
Vector3 direction; /*!< Směrový vektor (jednotkový). */
Vector3 inv_direction; /*!< Invertovaný směrový vektor. */
float t; /*!< Reálný parametr \f$tf$. */
Triangle *target; /*!< Ukazatel na zasažený trojúhelníky. */
float env_ior; /*!< Index lomu prostředí, kterým se paprsek aktuálně pohybuje. */
char direction_sign[3]; /*!< Znaménko směrového vektoru. */
//! Specializovaný konstruktor.
/*!
* Inicializuje paprsek podle zadaných hodnot a jdoucí do nekonečna.
*
* \param origin počátek.
* \param direction jednotkový směr.
*/
RayOld(const Vector3 &origin, const Vector3 &direction, const float bounce = EPSILON,
const float env_ior = IOR_AIR) {
this->origin = origin;
set_direction(direction);
this->origin += bounce * this->direction;
t = REAL_MAX;
target = NULL;
this->env_ior = env_ior;
#pragma omp atomic
++no_rays_;
}
//! Vypočte \f$\mathbf{r}(t)\f$.
//! \return Souřadnice bodu na paprsku pro zadaný parametr \f$t\f$.
Vector3 eval(const float t) {
return origin + direction * t;
}
//! Vypočte \f$r(t)\f$.
//! \return True je-li \a t menší než .
bool closest_hit(const float t, Triangle *const triangle) {
bool hit_confirmed = false;
//#pragma omp critical ( make_hit )
{
if ((t < this->t) && (t > 0)) {
this->t = t;
hit_confirmed = true;
target = triangle;
}
}
return hit_confirmed;
}
bool is_hit() const {
return ((t > 0) && (t < REAL_MAX) && (target != NULL));
}
void set_direction(const Vector3 &direction) {
this->direction = direction;
this->direction.Normalize();
inv_direction = Vector3(1 / this->direction.x, 1 / this->direction.y, 1 / this->direction.z);
direction_sign[0] = static_cast<char>( inv_direction.x < 0 ); // 0 pro <0,+inf> a 1 pro <-inf,0)
direction_sign[1] = static_cast<char>( inv_direction.y < 0 );
direction_sign[2] = static_cast<char>( inv_direction.z < 0 );
}
static void no_rays_reset() {
no_rays_ = 0;
}
static long long no_rays() {
return no_rays_;
}
private:
static long long no_rays_;
};
#endif
|
hellOMP.c | #include <omp.h>
#include <stdio.h>
int main() {
int NumThreads = 10;
omp_set_num_threads(NumThreads);
#pragma omp parallel
{
printf("Hello world from thread %d\n",omp_get_thread_num());
}
return 0;
}
|
5817.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp target teams distribute private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_binop__islt_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint32)
// A*D function (colscale): GB (_AxD__islt_uint32)
// D*A function (rowscale): GB (_DxB__islt_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint32)
// C=scalar+B GB (_bind1st__islt_uint32)
// C=scalar+B' GB (_bind1st_tran__islt_uint32)
// C=A+scalar GB (_bind2nd__islt_uint32)
// C=A'+scalar GB (_bind2nd_tran__islt_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT32 || GxB_NO_ISLT_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sparse.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*********************************************************************************
NAME: sparse
PURPOSE: This program tests the efficiency with which a sparse matrix
vector multiplication is carried out
USAGE: The program takes as input the number of threads, the 2log of the linear
size of the 2D grid (equalling the 2log of the square root of the order
of the sparse matrix), the radius of the difference stencil, and the number
of times the matrix-vector multiplication is carried out.
<progname> <# threads> <# iterations> <2log root-of-matrix-order> <radius>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
reverse()
NOTES:
HISTORY: Written by Rob Van der Wijngaart, August 2006.
Updated by RvdW to parallelize matrix generation, March 2007.
Updated by RvdW to fix verification bug, February 2013
Updated by RvdW to sort matrix elements to reflect traditional CSR storage,
August 2013
***********************************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
/* linearize the grid index */
#define LIN(i,j) (i+((j)<<lsize))
/* if the scramble flag is set, convert all (linearized) grid indices by
reversing their bits; if not, leave the grid indices alone */
#if SCRAMBLE
#define REVERSE(a,b) reverse((a),(b))
#else
#define REVERSE(a,b) (a)
#endif
#define BITS_IN_BYTE 8
static u64Int reverse(register u64Int, int);
static int compare(const void *el1, const void *el2);
int main(int argc, char **argv){
int iter, r; /* dummies */
int lsize; /* logarithmic linear size of grid */
int lsize2; /* logarithmic size of grid */
int size; /* linear size of grid */
s64Int size2; /* matrix order (=total # points in grid) */
int radius, /* stencil parameters */
stencil_size;
s64Int row, col, first, last; /* dummies */
s64Int i, j; /* dummies */
int iterations; /* number of times the multiplication is done */
s64Int elm; /* sequence number of matrix nonzero */
s64Int nent; /* number of nonzero entries */
double sparsity; /* fraction of non-zeroes in matrix */
double sparse_time,/* timing parameters */
avgtime;
double * RESTRICT matrix; /* sparse matrix entries */
double * RESTRICT vector; /* vector multiplying the sparse matrix */
double * RESTRICT result; /* computed matrix-vector product */
double temp; /* temporary scalar storing reduction data */
double vector_sum; /* checksum of result */
double reference_sum; /* checksum of "rhs" */
double epsilon = 1.e-8; /* error tolerance */
s64Int * RESTRICT colIndex; /* column indices of sparse matrix entries */
int nthread_input, /* thread parameters */
nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
size_t vector_space, /* variables used to hold prk_malloc sizes */
matrix_space,
index_space;
printf("Parallel Research Kernels version %s\n", PRKVERSION);
printf("OpenMP Sparse matrix-vector multiplication\n");
if (argc != 5) {
printf("Usage: %s <# threads> <# iterations> <2log grid size> <stencil radius>\n",*argv);
exit(EXIT_FAILURE);
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: Iterations must be positive : %d \n", iterations);
exit(EXIT_FAILURE);
}
lsize = atoi(*++argv);
lsize2 = 2*lsize;
size = 1<<lsize;
if (lsize <0) {
printf("ERROR: Log of grid size must be greater than or equal to zero: %d\n",
(int) lsize);
exit(EXIT_FAILURE);
}
/* compute number of points in the grid */
size2 = size*size;
radius = atoi(*++argv);
if (radius <0) {
printf("ERROR: Stencil radius must be non-negative: %d\n", (int) size);
exit(EXIT_FAILURE);
}
/* emit error if (periodic) stencil overlaps with itself */
if (size <2*radius+1) {
printf("ERROR: Grid extent %d smaller than stencil diameter 2*%d+1= %d\n",
size, radius, radius*2+1);
exit(EXIT_FAILURE);
}
/* compute total size of star stencil in 2D */
stencil_size = 4*radius+1;
/* sparsity follows from number of non-zeroes per row */
sparsity = (double)(4*radius+1)/(double)size2;
/* compute total number of non-zeroes */
nent = size2*stencil_size;
matrix_space = nent*sizeof(double);
if (matrix_space/sizeof(double) != nent) {
printf("ERROR: Cannot represent space for matrix: %zu\n", matrix_space);
exit(EXIT_FAILURE);
}
matrix = (double *) prk_malloc(matrix_space);
if (!matrix) {
printf("ERROR: Could not allocate space for sparse matrix: "FSTR64U"\n", nent);
exit(EXIT_FAILURE);
}
vector_space = 2*size2*sizeof(double);
if (vector_space/sizeof(double) != 2*size2) {
printf("ERROR: Cannot represent space for vectors: %zu\n", vector_space);
exit(EXIT_FAILURE);
}
vector = (double *) prk_malloc(vector_space);
if (!vector) {
printf("ERROR: Could not allocate space for vectors: %d\n", (int)(2*size2));
exit(EXIT_FAILURE);
}
result = vector + size2;
index_space = nent*sizeof(s64Int);
if (index_space/sizeof(s64Int) != nent) {
printf("ERROR: Cannot represent space for column indices: %zu\n", index_space);
exit(EXIT_FAILURE);
}
colIndex = (s64Int *) prk_malloc(index_space);
if (!colIndex) {
printf("ERROR: Could not allocate space for column indices: "FSTR64U"\n",
nent*sizeof(s64Int));
exit(EXIT_FAILURE);
}
#pragma omp parallel private (row, col, elm, first, last, iter)
{
#pragma omp master
{
nthread = omp_get_num_threads();
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %16d\n",nthread_input);
printf("Matrix order = "FSTR64U"\n", size2);
printf("Stencil diameter = %16d\n", 2*radius+1);
printf("Sparsity = %16.10lf\n", sparsity);
printf("Number of iterations = %16d\n", iterations);
#if SCRAMBLE
printf("Using scrambled indexing\n");
#else
printf("Using canonical indexing\n");
#endif
}
}
bail_out(num_error);
/* initialize the input and result vectors */
#pragma omp for
for (row=0; row<size2; row++) result[row] = vector[row] = 0.0;
/* fill matrix with nonzeroes corresponding to difference stencil. We use the
scrambling for reordering the points in the grid. */
#pragma omp for private (i,j,r)
for (row=0; row<size2; row++) {
j = row/size; i=row%size;
elm = row*stencil_size;
colIndex[elm] = REVERSE(LIN(i,j),lsize2);
for (r=1; r<=radius; r++, elm+=4) {
colIndex[elm+1] = REVERSE(LIN((i+r)%size,j),lsize2);
colIndex[elm+2] = REVERSE(LIN((i-r+size)%size,j),lsize2);
colIndex[elm+3] = REVERSE(LIN(i,(j+r)%size),lsize2);
colIndex[elm+4] = REVERSE(LIN(i,(j-r+size)%size),lsize2);
}
/* sort colIndex to make sure the compressed row accesses
vector elements in increasing order */
qsort(&(colIndex[row*stencil_size]), stencil_size, sizeof(s64Int), compare);
for (elm=row*stencil_size; elm<(row+1)*stencil_size; elm++)
matrix[elm] = 1.0/(double)(colIndex[elm]+1);
}
for (iter=0; iter<=iterations; iter++) {
/* start timer after a warmup iteration */
if (iter == 1) {
#pragma omp barrier
#pragma omp master
{
sparse_time = wtime();
}
}
/* fill vector */
#pragma omp for
for (row=0; row<size2; row++) vector[row] += (double) (row+1);
/* do the actual matrix-vector multiplication */
#pragma omp for
for (row=0; row<size2; row++) {
first = stencil_size*row; last = first+stencil_size-1;
temp=0.0;
/* #pragma omp simd reduction(+:temp) */
for (col=first; col<=last; col++) {
temp += matrix[col]*vector[colIndex[col]];
}
result[row] += temp;
}
} /* end of iterations */
#pragma omp barrier
#pragma omp master
{
sparse_time = wtime() - sparse_time;
}
} /* end of parallel region */
/* verification test */
reference_sum = 0.5 * (double) nent * (double) (iterations+1) *
(double) (iterations +2);
vector_sum = 0.0;
for (row=0; row<size2; row++) vector_sum += result[row];
if (ABS(vector_sum-reference_sum) > epsilon) {
printf("ERROR: Vector sum = %lf, Reference vector sum = %lf\n",
vector_sum, reference_sum);
exit(EXIT_FAILURE);
}
else {
printf("Solution validates\n");
#if VERBOSE
printf("Reference sum = %lf, vector sum = %lf\n",
reference_sum, vector_sum);
#endif
}
avgtime = sparse_time/iterations;
printf("Rate (MFlops/s): %lf Avg time (s): %lf\n",
1.0E-06 * (2.0*nent)/avgtime, avgtime);
exit(EXIT_SUCCESS);
}
/* Code below reverses bits in unsigned integer stored in a 64-bit word.
Bit reversal is with respect to the largest integer that is going to be
processed for the particular run of the code, to make sure the reversal
constitutes a true permutation. Hence, the final result needs to be shifted
to the right.
Example: if largest integer being processed is 0x000000ff = 255 =
0000...0011111111 (binary), then the unshifted reversal of 0x00000006 = 6 =
0000...0000000110 (binary) would be 011000000...0000 = 3*2^61, which is
outside the range of the original sequence 0-255. Setting shift_in_bits to
2log(256) = 8, the final result is shifted the the right by 64-8=56 bits,
so we get 000...0001100000 (binary) = 96, which is within the proper range */
u64Int reverse(register u64Int x, int shift_in_bits){
x = ((x >> 1) & 0x5555555555555555) | ((x << 1) & 0xaaaaaaaaaaaaaaaa);
x = ((x >> 2) & 0x3333333333333333) | ((x << 2) & 0xcccccccccccccccc);
x = ((x >> 4) & 0x0f0f0f0f0f0f0f0f) | ((x << 4) & 0xf0f0f0f0f0f0f0f0);
x = ((x >> 8) & 0x00ff00ff00ff00ff) | ((x << 8) & 0xff00ff00ff00ff00);
x = ((x >> 16) & 0x0000ffff0000ffff) | ((x << 16) & 0xffff0000ffff0000);
x = ((x >> 32) & 0x00000000ffffffff) | ((x << 32) & 0xffffffff00000000);
return (x>>((sizeof(u64Int)*BITS_IN_BYTE-shift_in_bits)));
}
int compare(const void *el1, const void *el2) {
s64Int v1 = *(s64Int *)el1;
s64Int v2 = *(s64Int *)el2;
return (v1<v2) ? -1 : (v1>v2) ? 1 : 0;
}
|
GB_unop__log1p_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log1p_fc32_fc32)
// op(A') function: GB (_unop_tran__log1p_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_clog1pf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog1pf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_clog1pf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log1p_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog1pf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog1pf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log1p_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lor_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint32)
// A*D function (colscale): GB (_AxD__lor_uint32)
// D*A function (rowscale): GB (_DxB__lor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint32)
// C=scalar+B GB (_bind1st__lor_uint32)
// C=scalar+B' GB (_bind1st_tran__lor_uint32)
// C=A+scalar GB (_bind2nd__lor_uint32)
// C=A'+scalar GB (_bind2nd_tran__lor_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT32 || GxB_NO_LOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NETNTLM_bs_fmt_plug.c | /*
* NETNTLM_fmt.c -- NTLM Challenge/Response
*
* Written by JoMo-Kun <jmk at foofus.net> in 2007
* and placed in the public domain.
*
* Modified for performance, support for Extended Session Security, OMP
* and UTF-8, by magnum 2010-2011.
* Modified for using Bitsliced DES by Deepika Dutta Mishra
* <dipikadutta at gmail.com> in 2013, no rights reserved.
*
* This algorithm is designed for performing brute-force cracking of the NTLM
* (version 1) challenge/response pairs exchanged during network-based
* authentication attempts [1]. The captured challenge/response pairs from these
* attempts should be stored using the L0phtCrack 2.0 LC format, specifically:
* username:unused:unused:lm response:ntlm response:challenge. For example:
*
* CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1:
* C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788
*
* It should be noted that a NTLM authentication response is not same as a NTLM
* password hash, which can be extracted using tools such as FgDump [2]. NTLM
* responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
* This version supports Extended Session Security. This is what
* is used when the "LM" hash ends in 32 zeros:
*
* DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000:
* abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_NETNTLM_old;
#elif FMT_REGISTERS_H
john_register_one(&fmt_NETNTLM_old);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "DES_std.h"
#include "DES_bs.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "md5.h"
#include "unicode.h"
#include "memdbg.h"
#ifndef uchar
#define uchar unsigned char
#endif
#define FORMAT_LABEL "netntlm-naive"
#define FORMAT_NAME "NTLMv1 C/R"
#define ALGORITHM_NAME "MD4 DES (ESS MD5) " DES_BS_ALGORITHM_NAME " naive"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 24
#define BINARY_ALIGN 4
#define PARTIAL_BINARY_SIZE 8
#define SALT_SIZE 8
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH 48
#define TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH)
#define MIN_KEYS_PER_CRYPT DES_BS_DEPTH
#define MAX_KEYS_PER_CRYPT DES_BS_DEPTH
static struct fmt_tests tests[] = {
{"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} },
{"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"},
{"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"},
{"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"},
{"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"},
{"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"},
{"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"},
{"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"},
{"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} },
{"", "M1xedC4se%^&*@)##(blahblah!@#", {"User", "", "", "lm-hash", "E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "1122334455667788"} },
{"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} },
{"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} },
{"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} },
{NULL}
};
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
static uchar (*output)[PARTIAL_BINARY_SIZE];
static uchar (*saved_key)[21]; // NT hash
static uchar *challenge;
static int keys_prepared;
static void set_salt(void *salt);
static void init(struct fmt_main *self)
{
/* LM =2 for DES encryption with no salt and no iterations */
DES_bs_init(2, DES_bs_cpt);
#if DES_bs_mt
self->params.min_keys_per_crypt = DES_bs_min_kpc;
self->params.max_keys_per_crypt = DES_bs_max_kpc;
#endif
saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE);
saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
output = mem_calloc_tiny(sizeof(*output) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (strncmp(ciphertext, "$NETNTLM$", 9)!=0) return 0;
if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0;
if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0;
for (pos = &ciphertext[9]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (*pos != '$') return 0;
for (pos++;atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) ||
(pos - ciphertext - 42 == CIPHERTEXT_LENGTH)))
return 1;
else
return 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
char clientChal[17];
if (!strncmp(split_fields[1], "$NETNTLM$", 9))
return split_fields[1];
if (!split_fields[3]||!split_fields[4]||!split_fields[5])
return split_fields[1];
if (strlen(split_fields[4]) != CIPHERTEXT_LENGTH)
return split_fields[1];
// this string suggests we have an improperly formatted NTLMv2
if (!strncmp(&split_fields[4][32], "0101000000000000", 16))
return split_fields[1];
// Handle ESS (8 byte client challenge in "LM" field padded with zeros)
if (strlen(split_fields[3]) == 48 && !strncmp(&split_fields[3][16],
"00000000000000000000000000000000", 32)) {
memcpy(clientChal, split_fields[3],16);
clientChal[16] = 0;
}
else
clientChal[0] = 0;
cp = mem_alloc(9+strlen(split_fields[5])+strlen(clientChal)+1+strlen(split_fields[4])+1);
sprintf(cp, "$NETNTLM$%s%s$%s", split_fields[5], clientChal, split_fields[4]);
if (valid(cp,self)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TOTAL_LENGTH + 1];
memset(out, 0, TOTAL_LENGTH + 1);
strcpy(out, ciphertext);
strlwr(&out[8]); /* Exclude: $NETNTLM$ */
return out;
}
static ARCH_WORD_32 *generate_des_format(uchar* binary)
{
static ARCH_WORD_32 out[6];
ARCH_WORD block[6];
int chr, src,dst,i;
uchar value, mask;
ARCH_WORD *ptr;
memset(block, 0, sizeof(block));
for (chr = 0; chr < 24; chr=chr + 8)
{
dst = 0;
for(i=0; i<8; i++)
{
value = binary[chr + i];
mask = 0x80;
for (src = 0; src < 8; src++) {
if (value & mask)
block[(chr/4) + (dst>>5)]|= 1 << (dst & 0x1F);
mask >>= 1;
dst++;
}
}
}
/* Apply initial permutation on ciphertext blocks */
for(i=0; i<6; i=i+2)
{
ptr = DES_do_IP(&block[i]);
out[i] = ptr[1];
out[i+1] = ptr[0];
}
return out;
}
static void *get_binary(char *ciphertext)
{
uchar binary[BINARY_SIZE];
int i;
ARCH_WORD_32 *ptr;
ciphertext = strrchr(ciphertext, '$') + 1;
for (i=0; i<BINARY_SIZE; i++) {
binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4;
binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]);
}
/* Set binary in DES format */
ptr = generate_des_format(binary);
return ptr;
}
static inline void setup_des_key(unsigned char key_56[], int index)
{
char key[8];
/* Right shift key bytes by 1 to bring in openssl format */
/* Each byte of key is xored with 0x80 to pass check for 0 in DES_bs_set_key() */
key[0] = (key_56[0] >> 1) | 0x80;
key[1] = (((key_56[0] << 7) | (key_56[1] >> 1)) >>1) | 0x80;
key[2] = (((key_56[1] << 6) | (key_56[2] >> 2)) >>1) | 0x80;
key[3] = (((key_56[2] << 5) | (key_56[3] >> 3)) >>1) | 0x80;
key[4] = (((key_56[3] << 4) | (key_56[4] >> 4)) >>1) | 0x80;
key[5] = (((key_56[4] << 3) | (key_56[5] >> 5)) >>1) | 0x80;
key[6] = (((key_56[5] << 2) | (key_56[6] >> 6)) >>1) | 0x80;
key[7] = ((key_56[6] << 1) >>1 ) | 0x80;
DES_bs_set_key((char*)key, index);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i;
if (!keys_prepared) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
int len;
/* Generate 16-byte NTLM hash */
len = E_md4hash((uchar *) saved_plain[i], saved_len[i],
saved_key[i]);
if (len <= 0)
saved_plain[i][-len] = 0; // match truncation
/* NULL-padding the 16-byte hash to 21-bytes is made
in cmp_exact if needed */
setup_des_key(saved_key[i], i);
}
keys_prepared = 1;
}
/* Bitsliced des encryption */
DES_bs_crypt_plain(count);
return count;
}
static int cmp_all(void *binary, int count)
{
return DES_bs_cmp_all((ARCH_WORD_32 *)binary, count);
}
static int cmp_one(void *binary, int index)
{
return DES_bs_cmp_one((ARCH_WORD_32 *)binary, 32, index);
}
static int cmp_exact(char *source, int index)
{
ARCH_WORD_32 *binary;
/* NULL-pad 16-byte NTLM hash to 21-bytes (postponed until now) */
memset(&saved_key[index][16], 0, 5);
binary = get_binary(source);
if (!DES_bs_cmp_one(binary, 64, index))
{
setup_des_key(saved_key[0], 0);
return 0;
}
setup_des_key(&saved_key[index][7], 0);
DES_bs_crypt_plain(1);
binary = get_binary(source);
if (!DES_bs_cmp_one(&binary[2], 64, 0))
{
setup_des_key(saved_key[0], 0);
return 0;
}
setup_des_key(&saved_key[index][14], 0);
DES_bs_crypt_plain(1);
binary = get_binary(source);
if (!DES_bs_cmp_one(&binary[4], 64, 0))
{
setup_des_key(saved_key[0], 0);
return 0;
}
setup_des_key(saved_key[0], 0);
return 1;
}
static void *get_salt(char *ciphertext)
{
static uchar *binary_salt;
int i, cnt,j;
unsigned char temp[SALT_SIZE];
if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
if (ciphertext[25] == '$') {
// Server challenge
ciphertext += 9;
for (i = 0; i < SALT_SIZE; ++i)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
} else {
uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE];
MD5_CTX ctx;
ciphertext += 9;
// Extended Session Security,
// Concatenate Server & Client challenges
for (i = 0;i < 2 * SALT_SIZE; ++i)
es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
// MD5 the concatenated challenges, result is our key
MD5_Init(&ctx);
MD5_Update(&ctx, es_salt, 16);
MD5_Final((void*)k1, &ctx);
memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it
}
/* Apply IP to salt */
memset(temp, 0, SALT_SIZE);
for (i = 0; i < 64; i++) {
cnt = DES_IP[i ^ 0x20];
j = (uchar)((binary_salt[cnt >> 3] >> (7 - (cnt & 7))) & 1);
temp[i/8] |= j << (7 - (i % 8));
}
memcpy(binary_salt, temp, SALT_SIZE);
return (void*)binary_salt;
}
static void set_salt(void *salt)
{
challenge = salt;
DES_bs_generate_plaintext(challenge);
}
static void netntlm_set_key(char *key, int index)
{
saved_len[index] = strlen(key);
memcpy(saved_plain[index], key, saved_len[index]+1);
keys_prepared = 0;
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int salt_hash(void *salt)
{
return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_NETNTLM_old = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if DES_BS
FMT_BS |
#if DES_bs_mt
FMT_OMP |
#endif
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
netntlm_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
DES_bs_get_hash_0,
DES_bs_get_hash_1,
DES_bs_get_hash_2,
DES_bs_get_hash_3,
DES_bs_get_hash_4,
DES_bs_get_hash_5,
DES_bs_get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
graph.c | /*!
* \file
*
* \brief Various routines with dealing with sparse graphs
*
* \author George Karypis
* \version\verbatim $Id: graph.c 13328 2012-12-31 14:57:40Z karypis $ \endverbatim
*/
#include "gklib/GKlib.h"
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a graph and initializes it
\returns the allocated graph. The various fields are set to NULL.
*/
/**************************************************************************/
gk_graph_t *gk_graph_Create()
{
gk_graph_t *graph;
graph = (gk_graph_t *)gk_malloc(sizeof(gk_graph_t), "gk_graph_Create: graph");
gk_graph_Init(graph);
return graph;
}
/*************************************************************************/
/*! Initializes the graph.
\param graph is the graph to be initialized.
*/
/*************************************************************************/
void gk_graph_Init(gk_graph_t *graph)
{
memset(graph, 0, sizeof(gk_graph_t));
graph->nvtxs = -1;
}
/*************************************************************************/
/*! Frees all the memory allocated for a graph.
\param graph is the graph to be freed.
*/
/*************************************************************************/
void gk_graph_Free(gk_graph_t **graph)
{
if (*graph == NULL)
return;
gk_graph_FreeContents(*graph);
gk_free((void **)graph, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the graph's different fields and
sets them to NULL.
\param graph is the graph whose contents will be freed.
*/
/*************************************************************************/
void gk_graph_FreeContents(gk_graph_t *graph)
{
gk_free((void *)&graph->xadj, &graph->adjncy,
&graph->iadjwgt, &graph->fadjwgt,
&graph->ivwgts, &graph->fvwgts,
&graph->ivsizes, &graph->fvsizes,
&graph->vlabels,
LTERM);
}
/**************************************************************************/
/*! Reads a sparse graph from the supplied file
\param filename is the file that stores the data.
\param format is the graph format. The supported values are:
GK_GRAPH_FMT_METIS.
\param isfewgts is 1 if the edge-weights should be read as floats
\param isfvwgts is 1 if the vertex-weights should be read as floats
\param isfvsizes is 1 if the vertex-sizes should be read as floats
\returns the graph that was read.
*/
/**************************************************************************/
gk_graph_t *gk_graph_Read(char *filename, int format, int isfewgts,
int isfvwgts, int isfvsizes)
{
ssize_t i, k, l;
size_t nfields, nvtxs, nedges, fmt, ncon, lnlen;
int32_t ival;
float fval;
int readsizes=0, readwgts=0, readvals=0, numbering=0;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin=NULL;
gk_graph_t *graph=NULL;
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
if (format == GK_GRAPH_FMT_METIS) {
fpin = gk_fopen(filename, "r", "gk_graph_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nvtxs, &nedges, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
nedges *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
}
else {
gk_errexit(SIGERR, "Unrecognized format: %d\n", format);
}
graph = gk_graph_Create();
graph->nvtxs = nvtxs;
graph->xadj = gk_zmalloc(nvtxs+1, "gk_graph_Read: xadj");
graph->adjncy = gk_i32malloc(nedges, "gk_graph_Read: adjncy");
if (readvals) {
if (isfewgts)
graph->fadjwgt = gk_fmalloc(nedges, "gk_graph_Read: fadjwgt");
else
graph->iadjwgt = gk_i32malloc(nedges, "gk_graph_Read: iadjwgt");
}
if (readsizes) {
if (isfvsizes)
graph->fvsizes = gk_fmalloc(nvtxs, "gk_graph_Read: fvsizes");
else
graph->ivsizes = gk_i32malloc(nvtxs, "gk_graph_Read: ivsizes");
}
if (readwgts) {
if (isfvwgts)
graph->fvwgts = gk_fmalloc(nvtxs*ncon, "gk_graph_Read: fvwgts");
else
graph->ivwgts = gk_i32malloc(nvtxs*ncon, "gk_graph_Read: ivwgts");
}
/*----------------------------------------------------------------------
* Read the sparse graph file
*---------------------------------------------------------------------*/
numbering = (numbering ? - 1 : 0);
for (graph->xadj[0]=0, k=0, i=0; i<nvtxs; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Pregraphure end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
if (isfvsizes) {
#ifdef __MSC__
graph->fvsizes[i] = (float)strtod(head, &tail);
#else
graph->fvsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (graph->fvsizes[i] < 0)
gk_errexit(SIGERR, "The size for vertex %zd must be >= 0\n", i+1);
}
else {
graph->ivsizes[i] = strtol(head, &tail, 0);
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (graph->ivsizes[i] < 0)
gk_errexit(SIGERR, "The size for vertex %zd must be >= 0\n", i+1);
}
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l<ncon; l++) {
if (isfvwgts) {
#ifdef __MSC__
graph->fvwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
graph->fvwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (graph->fvwgts[i*ncon+l] < 0)
gk_errexit(SIGERR, "The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
}
else {
graph->ivwgts[i*ncon+l] = strtol(head, &tail, 0);
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (graph->ivwgts[i*ncon+l] < 0)
gk_errexit(SIGERR, "The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
}
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((graph->adjncy[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
if (readvals) {
if (isfewgts) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for edge! Vertex:%zd, NNZ:%zd\n", i, k);
graph->fadjwgt[k] = fval;
}
else {
ival = strtol(head, &tail, 0);
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for edge! Vertex:%zd, NNZ:%zd\n", i, k);
graph->iadjwgt[k] = ival;
}
head = tail;
}
k++;
}
graph->xadj[i+1] = k;
}
if (k != nedges)
gk_errexit(SIGERR, "gk_graph_Read: Something wrong with the number of edges in "
"the input file. nedges=%zd, Actualnedges=%zd.\n", nedges, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return graph;
}
/**************************************************************************/
/*! Writes a graph into a file.
\param graph is the graph to be written,
\param filename is the name of the output file.
\param format is one of GK_GRAPH_FMT_METIS specifying
the format of the output file.
*/
/**************************************************************************/
void gk_graph_Write(gk_graph_t *graph, char *filename, int format)
{
ssize_t i, j;
int hasvwgts, hasvsizes, hasewgts;
FILE *fpout;
if (format != GK_GRAPH_FMT_METIS)
gk_errexit(SIGERR, "Unknown file format. %d\n", format);
if (filename)
fpout = gk_fopen(filename, "w", "gk_graph_Write: fpout");
else
fpout = stdout;
hasewgts = (graph->iadjwgt || graph->fadjwgt);
hasvwgts = (graph->ivwgts || graph->fvwgts);
hasvsizes = (graph->ivsizes || graph->fvsizes);
/* write the header line */
fprintf(fpout, "%d %zd", graph->nvtxs, graph->xadj[graph->nvtxs]/2);
if (hasvwgts || hasvsizes || hasewgts)
fprintf(fpout, " %d%d%d", hasvsizes, hasvwgts, hasewgts);
fprintf(fpout, "\n");
for (i=0; i<graph->nvtxs; i++) {
if (hasvsizes) {
if (graph->ivsizes)
fprintf(fpout, " %d", graph->ivsizes[i]);
else
fprintf(fpout, " %f", graph->fvsizes[i]);
}
if (hasvwgts) {
if (graph->ivwgts)
fprintf(fpout, " %d", graph->ivwgts[i]);
else
fprintf(fpout, " %f", graph->fvwgts[i]);
}
for (j=graph->xadj[i]; j<graph->xadj[i+1]; j++) {
fprintf(fpout, " %d", graph->adjncy[j]+1);
if (hasewgts) {
if (graph->iadjwgt)
fprintf(fpout, " %d", graph->iadjwgt[j]);
else
fprintf(fpout, " %f", graph->fadjwgt[j]);
}
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
/*************************************************************************/
/*! Returns a copy of a graph.
\param graph is the graph to be duplicated.
\returns the newly created copy of the graph.
*/
/**************************************************************************/
gk_graph_t *gk_graph_Dup(gk_graph_t *graph)
{
gk_graph_t *ngraph;
ngraph = gk_graph_Create();
ngraph->nvtxs = graph->nvtxs;
/* copy the adjacency structure */
if (graph->xadj)
ngraph->xadj = gk_zcopy(graph->nvtxs+1, graph->xadj,
gk_zmalloc(graph->nvtxs+1, "gk_graph_Dup: xadj"));
if (graph->ivwgts)
ngraph->ivwgts = gk_i32copy(graph->nvtxs, graph->ivwgts,
gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivwgts"));
if (graph->ivsizes)
ngraph->ivsizes = gk_i32copy(graph->nvtxs, graph->ivsizes,
gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivsizes"));
if (graph->vlabels)
ngraph->vlabels = gk_i32copy(graph->nvtxs, graph->vlabels,
gk_i32malloc(graph->nvtxs, "gk_graph_Dup: ivlabels"));
if (graph->fvwgts)
ngraph->fvwgts = gk_fcopy(graph->nvtxs, graph->fvwgts,
gk_fmalloc(graph->nvtxs, "gk_graph_Dup: fvwgts"));
if (graph->fvsizes)
ngraph->fvsizes = gk_fcopy(graph->nvtxs, graph->fvsizes,
gk_fmalloc(graph->nvtxs, "gk_graph_Dup: fvsizes"));
if (graph->adjncy)
ngraph->adjncy = gk_i32copy(graph->xadj[graph->nvtxs], graph->adjncy,
gk_i32malloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: adjncy"));
if (graph->iadjwgt)
ngraph->iadjwgt = gk_i32copy(graph->xadj[graph->nvtxs], graph->iadjwgt,
gk_i32malloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: iadjwgt"));
if (graph->fadjwgt)
ngraph->fadjwgt = gk_fcopy(graph->xadj[graph->nvtxs], graph->fadjwgt,
gk_fmalloc(graph->xadj[graph->nvtxs], "gk_graph_Dup: fadjwgt"));
return ngraph;
}
/*************************************************************************/
/*! Returns a subgraph containing a set of consecutive vertices.
\param graph is the original graph.
\param vstart is the starting vertex.
\param nvtxs is the number of vertices from vstart to extract.
\returns the newly created subgraph.
*/
/**************************************************************************/
gk_graph_t *gk_graph_ExtractSubgraph(gk_graph_t *graph, int vstart, int nvtxs)
{
ssize_t i;
gk_graph_t *ngraph;
if (vstart+nvtxs > graph->nvtxs)
return NULL;
ngraph = gk_graph_Create();
ngraph->nvtxs = nvtxs;
/* copy the adjancy structure */
if (graph->xadj)
ngraph->xadj = gk_zcopy(nvtxs+1, graph->xadj+vstart,
gk_zmalloc(nvtxs+1, "gk_graph_ExtractSubgraph: xadj"));
for (i=nvtxs; i>=0; i--)
ngraph->xadj[i] -= ngraph->xadj[0];
ASSERT(ngraph->xadj[0] == 0);
if (graph->ivwgts)
ngraph->ivwgts = gk_i32copy(nvtxs, graph->ivwgts+vstart,
gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: ivwgts"));
if (graph->ivsizes)
ngraph->ivsizes = gk_i32copy(nvtxs, graph->ivsizes+vstart,
gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: ivsizes"));
if (graph->vlabels)
ngraph->vlabels = gk_i32copy(nvtxs, graph->vlabels+vstart,
gk_i32malloc(nvtxs, "gk_graph_ExtractSubgraph: vlabels"));
if (graph->fvwgts)
ngraph->fvwgts = gk_fcopy(nvtxs, graph->fvwgts+vstart,
gk_fmalloc(nvtxs, "gk_graph_ExtractSubgraph: fvwgts"));
if (graph->fvsizes)
ngraph->fvsizes = gk_fcopy(nvtxs, graph->fvsizes+vstart,
gk_fmalloc(nvtxs, "gk_graph_ExtractSubgraph: fvsizes"));
ASSERT(ngraph->xadj[nvtxs] == graph->xadj[vstart+nvtxs]-graph->xadj[vstart]);
if (graph->adjncy)
ngraph->adjncy = gk_i32copy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart],
graph->adjncy+graph->xadj[vstart],
gk_i32malloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart],
"gk_graph_ExtractSubgraph: adjncy"));
if (graph->iadjwgt)
ngraph->iadjwgt = gk_i32copy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart],
graph->iadjwgt+graph->xadj[vstart],
gk_i32malloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart],
"gk_graph_ExtractSubgraph: iadjwgt"));
if (graph->fadjwgt)
ngraph->fadjwgt = gk_fcopy(graph->xadj[vstart+nvtxs]-graph->xadj[vstart],
graph->fadjwgt+graph->xadj[vstart],
gk_fmalloc(graph->xadj[vstart+nvtxs]-graph->xadj[vstart],
"gk_graph_ExtractSubgraph: fadjwgt"));
return ngraph;
}
/*************************************************************************/
/*! Returns a graph that has been reordered according to the permutation.
\param[IN] graph is the graph to be re-ordered.
\param[IN] perm is the new ordering of the graph's vertices
\param[IN] iperm is the original ordering of the re-ordered graph's vertices
\returns the newly created copy of the graph.
\note Either perm or iperm can be NULL but not both.
*/
/**************************************************************************/
gk_graph_t *gk_graph_Reorder(gk_graph_t *graph, int32_t *perm, int32_t *iperm)
{
ssize_t j, jj, *xadj;
int i, k, u, v, nvtxs;
int freeperm=0, freeiperm=0;
int32_t *adjncy;
gk_graph_t *ngraph;
if (perm == NULL && iperm == NULL)
return NULL;
ngraph = gk_graph_Create();
ngraph->nvtxs = nvtxs = graph->nvtxs;
xadj = graph->xadj;
adjncy = graph->adjncy;
/* allocate memory for the different structures that are present in graph */
if (graph->xadj)
ngraph->xadj = gk_zmalloc(nvtxs+1, "gk_graph_Reorder: xadj");
if (graph->ivwgts)
ngraph->ivwgts = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivwgts");
if (graph->ivsizes)
ngraph->ivsizes = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivsizes");
if (graph->vlabels)
ngraph->vlabels = gk_i32malloc(nvtxs, "gk_graph_Reorder: ivlabels");
if (graph->fvwgts)
ngraph->fvwgts = gk_fmalloc(nvtxs, "gk_graph_Reorder: fvwgts");
if (graph->fvsizes)
ngraph->fvsizes = gk_fmalloc(nvtxs, "gk_graph_Reorder: fvsizes");
if (graph->adjncy)
ngraph->adjncy = gk_i32malloc(graph->xadj[nvtxs], "gk_graph_Reorder: adjncy");
if (graph->iadjwgt)
ngraph->iadjwgt = gk_i32malloc(graph->xadj[nvtxs], "gk_graph_Reorder: iadjwgt");
if (graph->fadjwgt)
ngraph->fadjwgt = gk_fmalloc(graph->xadj[nvtxs], "gk_graph_Reorder: fadjwgt");
/* create perm/iperm if not provided */
if (perm == NULL) {
freeperm = 1;
perm = gk_i32malloc(nvtxs, "gk_graph_Reorder: perm");
for (i=0; i<nvtxs; i++)
perm[iperm[i]] = i;
}
if (iperm == NULL) {
freeiperm = 1;
iperm = gk_i32malloc(nvtxs, "gk_graph_Reorder: iperm");
for (i=0; i<nvtxs; i++)
iperm[perm[i]] = i;
}
/* fill-in the information of the re-ordered graph */
ngraph->xadj[0] = jj = 0;
for (v=0; v<nvtxs; v++) {
u = iperm[v];
for (j=xadj[u]; j<xadj[u+1]; j++, jj++) {
ngraph->adjncy[jj] = perm[adjncy[j]];
if (graph->iadjwgt)
ngraph->iadjwgt[jj] = graph->iadjwgt[j];
if (graph->fadjwgt)
ngraph->fadjwgt[jj] = graph->fadjwgt[j];
}
if (graph->ivwgts)
ngraph->ivwgts[v] = graph->ivwgts[u];
if (graph->fvwgts)
ngraph->fvwgts[v] = graph->fvwgts[u];
if (graph->ivsizes)
ngraph->ivsizes[v] = graph->ivsizes[u];
if (graph->fvsizes)
ngraph->fvsizes[v] = graph->fvsizes[u];
if (graph->vlabels)
ngraph->vlabels[v] = graph->vlabels[u];
ngraph->xadj[v+1] = jj;
}
/* free memory */
if (freeperm)
gk_free((void **)&perm, LTERM);
if (freeiperm)
gk_free((void **)&iperm, LTERM);
return ngraph;
}
/*************************************************************************/
/*! This function finds the connected components in a graph.
\param graph is the graph structure
\param cptr is the ptr structure of the CSR representation of the
components. The length of this vector must be graph->nvtxs+1.
\param cind is the indices structure of the CSR representation of
the components. The length of this vector must be graph->nvtxs.
\returns the number of components that it found.
\note The cptr and cind parameters can be NULL, in which case only the
number of connected components is returned.
*/
/*************************************************************************/
int gk_graph_FindComponents(gk_graph_t *graph, int32_t *cptr, int32_t *cind)
{
ssize_t i, ii, j, jj, k, nvtxs, first, last, ntodo, ncmps;
ssize_t *xadj;
int32_t *adjncy, *pos, *todo;
int32_t mustfree_ccsr=0, mustfree_where=0;
nvtxs = graph->nvtxs;
xadj = graph->xadj;
adjncy = graph->adjncy;
/* Deal with NULL supplied cptr/cind vectors */
if (cptr == NULL) {
cptr = gk_i32malloc(nvtxs+1, "gk_graph_FindComponents: cptr");
cind = gk_i32malloc(nvtxs, "gk_graph_FindComponents: cind");
mustfree_ccsr = 1;
}
/* The list of vertices that have not been touched yet.
The valid entries are from [0..ntodo). */
todo = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: todo"));
/* For a vertex that has not been visited, pos[i] is the position in the
todo list that this vertex is stored.
If a vertex has been visited, pos[i] = -1. */
pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: pos"));
/* Find the connected componends */
ncmps = -1;
ntodo = nvtxs; /* All vertices have not been visited */
first = last = 0; /* Point to the first and last vertices that have been touched
but not explored.
These vertices are stored in cind[first]...cind[last-1]. */
while (ntodo > 0) {
if (first == last) { /* Find another starting vertex */
cptr[++ncmps] = first; /* Mark the end of the current CC */
ASSERT(pos[todo[0]] != -1);
i = todo[0];
cind[last++] = i;
pos[i] = -1;
}
i = cind[first++]; /* Get the first visited but unexplored vertex */
/* Remove i from the todo list and put the last item in the todo
list at the position that i was so that the todo list will be
consequtive. The pos[] array is updated accordingly to keep track
the location of the vertices in the todo[] list. */
k = pos[i];
j = todo[k] = todo[--ntodo];
pos[j] = k;
for (j=xadj[i]; j<xadj[i+1]; j++) {
k = adjncy[j];
if (pos[k] != -1) {
cind[last++] = k;
pos[k] = -1;
}
}
}
cptr[++ncmps] = first;
if (mustfree_ccsr)
gk_free((void **)&cptr, &cind, LTERM);
gk_free((void **)&pos, &todo, LTERM);
return (int) ncmps;
}
/*************************************************************************/
/*! This function computes a permutation of the vertices based on a
breadth-first-traversal. It can be used for re-ordering the graph
to reduce its bandwidth for better cache locality.
The algorithm used is a simplified version of the method used to find
the connected components.
\param[IN] graph is the graph structure
\param[IN] v is the starting vertex of the BFS
\param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph.
\param[OUT] iperm[i] stores the ID of the vertex that corresponds to
the ith vertex in the re-ordered graph.
\note The perm or iperm (but not both) can be NULL, at which point,
the corresponding arrays are not returned. Though the program
works fine when both are NULL, doing that is not smart.
The returned arrays should be freed with gk_free().
*/
/*************************************************************************/
void gk_graph_ComputeBFSOrdering(gk_graph_t *graph, int v, int32_t **r_perm,
int32_t **r_iperm)
{
ssize_t j, *xadj;
int i, k, nvtxs, first, last;
int32_t *adjncy, *cot, *pos;
if (graph->nvtxs <= 0)
return;
nvtxs = graph->nvtxs;
xadj = graph->xadj;
adjncy = graph->adjncy;
/* This array will function like pos + touched of the CC method */
pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_ComputeBFSOrdering: pos"));
/* This array ([C]losed[O]pen[T]odo => cot) serves three purposes.
Positions from [0...first) is the current iperm[] vector of the explored vertices;
Positions from [first...last) is the OPEN list (i.e., visited vertices);
Positions from [last...nvtxs) is the todo list. */
cot = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_ComputeBFSOrdering: cot"));
/* put v at the front of the todo list */
pos[0] = cot[0] = v;
pos[v] = cot[v] = 0;
/* Find the connected componends induced by the partition */
first = last = 0;
while (first < nvtxs) {
if (first == last) { /* Find another starting vertex */
k = cot[last];
ASSERT(pos[k] != -1);
pos[k] = -1; /* mark node as being visited */
last++;
}
i = cot[first++]; /* the ++ advances the explored vertices */
for (j=xadj[i]; j<xadj[i+1]; j++) {
k = adjncy[j];
/* if a node has already been visited, its perm[] will be -1 */
if (pos[k] != -1) {
/* pos[k] is the location within iperm of where k resides (it is in the 'todo' part);
It is placed in that location cot[last] (end of OPEN list) that we
are about to overwrite and update pos[cot[last]] to reflect that. */
cot[pos[k]] = cot[last]; /* put the head of the todo list to
where k was in the todo list */
pos[cot[last]] = pos[k]; /* update perm to reflect the move */
cot[last++] = k; /* put node at the end of the OPEN list */
pos[k] = -1; /* mark node as being visited */
}
}
}
/* time to decide what to return */
if (r_perm != NULL) {
/* use the 'pos' array to build the perm array */
for (i=0; i<nvtxs; i++)
pos[cot[i]] = i;
*r_perm = pos;
pos = NULL;
}
if (r_iperm != NULL) {
*r_iperm = cot;
cot = NULL;
}
/* cleanup memory */
gk_free((void **)&pos, &cot, LTERM);
}
/*************************************************************************/
/*! This function computes a permutation of the vertices based on a
best-first-traversal. It can be used for re-ordering the graph
to reduce its bandwidth for better cache locality.
\param[IN] graph is the graph structure.
\param[IN] v is the starting vertex of the best-first traversal.
\param[IN] type indicates the criteria to use to measure the 'bestness'
of a vertex.
\param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph.
\param[OUT] iperm[i] stores the ID of the vertex that corresponds to
the ith vertex in the re-ordered graph.
\note The perm or iperm (but not both) can be NULL, at which point,
the corresponding arrays are not returned. Though the program
works fine when both are NULL, doing that is not smart.
The returned arrays should be freed with gk_free().
*/
/*************************************************************************/
void gk_graph_ComputeBestFOrdering0(gk_graph_t *graph, int v, int type,
int32_t **r_perm, int32_t **r_iperm)
{
ssize_t j, jj, *xadj;
int i, k, u, nvtxs;
int32_t *adjncy, *perm, *degrees, *minIDs, *open;
gk_i32pq_t *queue;
if (graph->nvtxs <= 0)
return;
nvtxs = graph->nvtxs;
xadj = graph->xadj;
adjncy = graph->adjncy;
/* the degree of the vertices in the closed list */
degrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: degrees");
/* the minimum vertex ID of an open vertex to the closed list */
minIDs = gk_i32smalloc(nvtxs, nvtxs+1, "gk_graph_ComputeBestFOrdering: minIDs");
/* the open list */
open = gk_i32malloc(nvtxs, "gk_graph_ComputeBestFOrdering: open");
/* if perm[i] >= 0, then perm[i] is the order of vertex i;
otherwise perm[i] == -1.
*/
perm = gk_i32smalloc(nvtxs, -1, "gk_graph_ComputeBestFOrdering: perm");
/* create the queue and put everything in it */
queue = gk_i32pqCreate(nvtxs);
for (i=0; i<nvtxs; i++)
gk_i32pqInsert(queue, i, 0);
gk_i32pqUpdate(queue, v, 1);
open[0] = v;
/* start processing the nodes */
for (i=0; i<nvtxs; i++) {
if ((v = gk_i32pqGetTop(queue)) == -1)
gk_errexit(SIGERR, "The priority queue got empty ahead of time [i=%d].\n", i);
if (perm[v] != -1)
gk_errexit(SIGERR, "The perm[%d] has already been set.\n", v);
perm[v] = i;
for (j=xadj[v]; j<xadj[v+1]; j++) {
u = adjncy[j];
if (perm[u] == -1) {
degrees[u]++;
minIDs[u] = (i < minIDs[u] ? i : minIDs[u]);
switch (type) {
case 1: /* DFS */
gk_i32pqUpdate(queue, u, 1);
break;
case 2: /* Max in closed degree */
gk_i32pqUpdate(queue, u, degrees[u]);
break;
case 3: /* Sum of orders in closed list */
for (k=0, jj=xadj[u]; jj<xadj[u+1]; jj++) {
if (perm[adjncy[jj]] != -1)
k += perm[adjncy[jj]];
}
gk_i32pqUpdate(queue, u, k);
break;
case 4: /* Sum of order-differences (w.r.t. current number) in closed
list (updated once in a while) */
for (k=0, jj=xadj[u]; jj<xadj[u+1]; jj++) {
if (perm[adjncy[jj]] != -1)
k += (i-perm[adjncy[jj]]);
}
gk_i32pqUpdate(queue, u, k);
break;
default:
;
}
}
}
}
/* time to decide what to return */
if (r_perm != NULL) {
*r_perm = perm;
perm = NULL;
}
if (r_iperm != NULL) {
/* use the 'degrees' array to build the iperm array */
for (i=0; i<nvtxs; i++)
degrees[perm[i]] = i;
*r_iperm = degrees;
degrees = NULL;
}
/* cleanup memory */
gk_i32pqDestroy(queue);
gk_free((void **)&perm, °rees, &minIDs, &open, LTERM);
}
/*************************************************************************/
/*! This function computes a permutation of the vertices based on a
best-first-traversal. It can be used for re-ordering the graph
to reduce its bandwidth for better cache locality.
\param[IN] graph is the graph structure.
\param[IN] v is the starting vertex of the best-first traversal.
\param[IN] type indicates the criteria to use to measure the 'bestness'
of a vertex.
\param[OUT] perm[i] stores the ID of vertex i in the re-ordered graph.
\param[OUT] iperm[i] stores the ID of the vertex that corresponds to
the ith vertex in the re-ordered graph.
\note The perm or iperm (but not both) can be NULL, at which point,
the corresponding arrays are not returned. Though the program
works fine when both are NULL, doing that is not smart.
The returned arrays should be freed with gk_free().
*/
/*************************************************************************/
void gk_graph_ComputeBestFOrdering(gk_graph_t *graph, int v, int type,
int32_t **r_perm, int32_t **r_iperm)
{
ssize_t j, jj, *xadj;
int i, k, u, nvtxs, nopen, ntodo;
int32_t *adjncy, *perm, *degrees, *wdegrees, *sod, *level, *ot, *pos;
gk_i32pq_t *queue;
if (graph->nvtxs <= 0)
return;
nvtxs = graph->nvtxs;
xadj = graph->xadj;
adjncy = graph->adjncy;
/* the degree of the vertices in the closed list */
degrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: degrees");
/* the weighted degree of the vertices in the closed list for type==3 */
wdegrees = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: wdegrees");
/* the sum of differences for type==4 */
sod = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: sod");
/* the encountering level of a vertex type==5 */
level = gk_i32smalloc(nvtxs, 0, "gk_graph_ComputeBestFOrdering: level");
/* The open+todo list of vertices.
The vertices from [0..nopen] are the open vertices.
The vertices from [nopen..ntodo) are the todo vertices.
*/
ot = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: ot"));
/* For a vertex that has not been explored, pos[i] is the position in the ot list. */
pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_graph_FindComponents: pos"));
/* if perm[i] >= 0, then perm[i] is the order of vertex i; otherwise perm[i] == -1. */
perm = gk_i32smalloc(nvtxs, -1, "gk_graph_ComputeBestFOrdering: perm");
/* create the queue and put the starting vertex in it */
queue = gk_i32pqCreate(nvtxs);
gk_i32pqInsert(queue, v, 1);
/* put v at the front of the open list */
pos[0] = ot[0] = v;
pos[v] = ot[v] = 0;
nopen = 1;
ntodo = nvtxs;
/* start processing the nodes */
for (i=0; i<nvtxs; i++) {
if (nopen == 0) { /* deal with non-connected graphs */
gk_i32pqInsert(queue, ot[0], 1);
nopen++;
}
if ((v = gk_i32pqGetTop(queue)) == -1)
gk_errexit(SIGERR, "The priority queue got empty ahead of time [i=%d].\n", i);
if (perm[v] != -1)
gk_errexit(SIGERR, "The perm[%d] has already been set.\n", v);
perm[v] = i;
if (ot[pos[v]] != v)
gk_errexit(SIGERR, "Something went wrong [ot[pos[%d]]!=%d.\n", v, v);
if (pos[v] >= nopen)
gk_errexit(SIGERR, "The position of v is not in open list. pos[%d]=%d is >=%d.\n", v, pos[v], nopen);
/* remove v from the open list and re-arrange the todo part of the list */
ot[pos[v]] = ot[nopen-1];
pos[ot[nopen-1]] = pos[v];
if (ntodo > nopen) {
ot[nopen-1] = ot[ntodo-1];
pos[ot[ntodo-1]] = nopen-1;
}
nopen--;
ntodo--;
for (j=xadj[v]; j<xadj[v+1]; j++) {
u = adjncy[j];
if (perm[u] == -1) {
/* update ot list, if u is not in the open list by putting it at the end
of the open list. */
if (degrees[u] == 0) {
ot[pos[u]] = ot[nopen];
pos[ot[nopen]] = pos[u];
ot[nopen] = u;
pos[u] = nopen;
nopen++;
level[u] = level[v]+1;
gk_i32pqInsert(queue, u, 0);
}
/* update the in-closed degree */
degrees[u]++;
/* update the queues based on the type */
switch (type) {
case 1: /* DFS */
gk_i32pqUpdate(queue, u, 1000*(i+1)+degrees[u]);
break;
case 2: /* Max in closed degree */
gk_i32pqUpdate(queue, u, degrees[u]);
break;
case 3: /* Sum of orders in closed list */
wdegrees[u] += i;
gk_i32pqUpdate(queue, u, wdegrees[u]);
break;
case 4: /* Sum of order-differences */
/* this is handled at the end of the loop */
;
break;
case 5: /* BFS with in degree priority */
gk_i32pqUpdate(queue, u, -(1000*level[u] - degrees[u]));
break;
case 6: /* Hybrid of 1+2 */
gk_i32pqUpdate(queue, u, (i+1)*degrees[u]);
break;
default:
;
}
}
}
if (type == 4) { /* update all the vertices in the open list */
for (j=0; j<nopen; j++) {
u = ot[j];
if (perm[u] != -1)
gk_errexit(SIGERR, "For i=%d, the open list contains a closed vertex: ot[%zd]=%d, perm[%d]=%d.\n", i, j, u, u, perm[u]);
sod[u] += degrees[u];
if (i<1000 || i%25==0)
gk_i32pqUpdate(queue, u, sod[u]);
}
}
/*
for (j=0; j<ntodo; j++) {
if (pos[ot[j]] != j)
gk_errexit(SIGERR, "pos[ot[%zd]] != %zd.\n", j, j);
}
*/
}
/* time to decide what to return */
if (r_perm != NULL) {
*r_perm = perm;
perm = NULL;
}
if (r_iperm != NULL) {
/* use the 'degrees' array to build the iperm array */
for (i=0; i<nvtxs; i++)
degrees[perm[i]] = i;
*r_iperm = degrees;
degrees = NULL;
}
/* cleanup memory */
gk_i32pqDestroy(queue);
gk_free((void **)&perm, °rees, &wdegrees, &sod, &ot, &pos, &level, LTERM);
}
/*************************************************************************/
/*! This function computes the single-source shortest path lengths from the
root node to all the other nodes in the graph. If the graph is not
connected then, the sortest part to the vertices in the other components
is -1.
\param[IN] graph is the graph structure.
\param[IN] v is the root of the single-source shortest path computations.
\param[IN] type indicates the criteria to use to measure the 'bestness'
of a vertex.
\param[OUT] sps[i] stores the length of the shortest path from v to vertex i.
If no such path exists, then it is -1. Note that the returned
array will be either an array of int32_t or an array of floats.
The specific type is determined by the existance of non NULL
iadjwgt and fadjwgt arrays. If both of these arrays exist, then
priority is given to iadjwgt.
\note The returned array should be freed with gk_free().
*/
/*************************************************************************/
void gk_graph_SingleSourceShortestPaths(gk_graph_t *graph, int v, void **r_sps)
{
ssize_t *xadj;
int i, u, nvtxs;
int32_t *adjncy, *inqueue;
if (graph->nvtxs <= 0)
return;
nvtxs = graph->nvtxs;
xadj = graph->xadj;
adjncy = graph->adjncy;
inqueue = gk_i32smalloc(nvtxs, 0, "gk_graph_SingleSourceShortestPaths: inqueue");
/* determine if you will be computing using int32_t or float and proceed from there */
if (graph->iadjwgt != NULL) {
gk_i32pq_t *queue;
int32_t *adjwgt;
int32_t *sps;
adjwgt = graph->iadjwgt;
queue = gk_i32pqCreate(nvtxs);
gk_i32pqInsert(queue, v, 0);
inqueue[v] = 1;
sps = gk_i32smalloc(nvtxs, -1, "gk_graph_SingleSourceShortestPaths: sps");
sps[v] = 0;
/* start processing the nodes */
while ((v = gk_i32pqGetTop(queue)) != -1) {
inqueue[v] = 2;
/* relax the adjacent edges */
for (i=xadj[v]; i<xadj[v+1]; i++) {
u = adjncy[i];
if (inqueue[u] == 2)
continue;
if (sps[u] < 0 || sps[v]+adjwgt[i] < sps[u]) {
sps[u] = sps[v]+adjwgt[i];
if (inqueue[u])
gk_i32pqUpdate(queue, u, -sps[u]);
else {
gk_i32pqInsert(queue, u, -sps[u]);
inqueue[u] = 1;
}
}
}
}
*r_sps = (void *)sps;
gk_i32pqDestroy(queue);
}
else {
gk_fpq_t *queue;
float *adjwgt;
float *sps;
adjwgt = graph->fadjwgt;
queue = gk_fpqCreate(nvtxs);
gk_fpqInsert(queue, v, 0);
inqueue[v] = 1;
sps = gk_fsmalloc(nvtxs, -1, "gk_graph_SingleSourceShortestPaths: sps");
sps[v] = 0;
/* start processing the nodes */
while ((v = gk_fpqGetTop(queue)) != -1) {
inqueue[v] = 2;
/* relax the adjacent edges */
for (i=xadj[v]; i<xadj[v+1]; i++) {
u = adjncy[i];
if (inqueue[u] == 2)
continue;
if (sps[u] < 0 || sps[v]+adjwgt[i] < sps[u]) {
sps[u] = sps[v]+adjwgt[i];
if (inqueue[u])
gk_fpqUpdate(queue, u, -sps[u]);
else {
gk_fpqInsert(queue, u, -sps[u]);
inqueue[u] = 1;
}
}
}
}
*r_sps = (void *)sps;
gk_fpqDestroy(queue);
}
gk_free((void **)&inqueue, LTERM);
}
#ifdef XXX
/*************************************************************************/
/*! Sorts the adjacency lists in increasing vertex order
\param graph the graph itself,
*/
/**************************************************************************/
void gk_graph_SortAdjacencies(gk_graph_t *graph)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!graph->rowptr)
gk_errexit(SIGERR, "Row-based view of the graphrix does not exists.\n");
n = graph->nrows;
ptr = graph->rowptr;
ind = graph->rowind;
val = graph->rowval;
break;
case GK_CSR_COL:
if (!graph->colptr)
gk_errexit(SIGERR, "Column-based view of the graphrix does not exists.\n");
n = graph->ncols;
ptr = graph->colptr;
ind = graph->colind;
val = graph->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_graph_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_graph_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Returns a subgraphrix containing a certain set of rows.
\param graph is the original graphrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created subgraphrix.
*/
/**************************************************************************/
gk_graph_t *gk_graph_ExtractRows(gk_graph_t *graph, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_graph_t *ngraph;
ngraph = gk_graph_Create();
ngraph->nrows = nrows;
ngraph->ncols = graph->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += graph->rowptr[rind[i]+1]-graph->rowptr[rind[i]];
ngraph->rowptr = gk_zmalloc(ngraph->nrows+1, "gk_graph_ExtractPartition: rowptr");
ngraph->rowind = gk_imalloc(nnz, "gk_graph_ExtractPartition: rowind");
ngraph->rowval = gk_fmalloc(nnz, "gk_graph_ExtractPartition: rowval");
ngraph->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowind+graph->rowptr[i], ngraph->rowind+nnz);
gk_fcopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowval+graph->rowptr[i], ngraph->rowval+nnz);
nnz += graph->rowptr[i+1]-graph->rowptr[i];
ngraph->rowptr[++j] = nnz;
}
ASSERT(j == ngraph->nrows);
return ngraph;
}
/*************************************************************************/
/*! Returns a subgraphrix corresponding to a specified partitioning of rows.
\param graph is the original graphrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created subgraphrix.
*/
/**************************************************************************/
gk_graph_t *gk_graph_ExtractPartition(gk_graph_t *graph, int *part, int pid)
{
ssize_t i, j, nnz;
gk_graph_t *ngraph;
ngraph = gk_graph_Create();
ngraph->nrows = 0;
ngraph->ncols = graph->ncols;
for (nnz=0, i=0; i<graph->nrows; i++) {
if (part[i] == pid) {
ngraph->nrows++;
nnz += graph->rowptr[i+1]-graph->rowptr[i];
}
}
ngraph->rowptr = gk_zmalloc(ngraph->nrows+1, "gk_graph_ExtractPartition: rowptr");
ngraph->rowind = gk_imalloc(nnz, "gk_graph_ExtractPartition: rowind");
ngraph->rowval = gk_fmalloc(nnz, "gk_graph_ExtractPartition: rowval");
ngraph->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<graph->nrows; i++) {
if (part[i] == pid) {
gk_icopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowind+graph->rowptr[i], ngraph->rowind+nnz);
gk_fcopy(graph->rowptr[i+1]-graph->rowptr[i], graph->rowval+graph->rowptr[i], ngraph->rowval+nnz);
nnz += graph->rowptr[i+1]-graph->rowptr[i];
ngraph->rowptr[++j] = nnz;
}
}
ASSERT(j == ngraph->nrows);
return ngraph;
}
/*************************************************************************/
/*! Splits the graphrix into multiple sub-graphrices based on the provided
color array.
\param graph is the original graphrix.
\param color is an array of size equal to the number of non-zeros
in the graphrix (row-wise structure). The graphrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of graphrices for each supplied color number.
*/
/**************************************************************************/
gk_graph_t **gk_graph_Split(gk_graph_t *graph, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_graph_t **sgraphs;
nrows = graph->nrows;
rowptr = graph->rowptr;
rowind = graph->rowind;
rowval = graph->rowval;
ncolors = gk_imax(rowptr[nrows], color)+1;
sgraphs = (gk_graph_t **)gk_malloc(sizeof(gk_graph_t *)*ncolors, "gk_graph_Split: sgraphs");
for (i=0; i<ncolors; i++) {
sgraphs[i] = gk_graph_Create();
sgraphs[i]->nrows = graph->nrows;
sgraphs[i]->ncols = graph->ncols;
sgraphs[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_graph_Split: sgraphs[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
sgraphs[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, sgraphs[i]->rowptr);
for (i=0; i<ncolors; i++) {
sgraphs[i]->rowind = gk_imalloc(sgraphs[i]->rowptr[nrows], "gk_graph_Split: sgraphs[i]->rowind");
sgraphs[i]->rowval = gk_fmalloc(sgraphs[i]->rowptr[nrows], "gk_graph_Split: sgraphs[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
sgraphs[color[j]]->rowind[sgraphs[color[j]]->rowptr[i]] = rowind[j];
sgraphs[color[j]]->rowval[sgraphs[color[j]]->rowptr[i]] = rowval[j];
sgraphs[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, sgraphs[i]->rowptr);
return sgraphs;
}
/*************************************************************************/
/*! Prunes certain rows/columns of the graphrix. The prunning takes place
by analyzing the row structure of the graphrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param graph the graphrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the graphrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned graphrix consisting only of its row-based structure.
The input graphrix is not modified.
*/
/**************************************************************************/
gk_graph_t *gk_graph_Prune(gk_graph_t *graph, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_graph_t *ngraph;
ngraph = gk_graph_Create();
nrows = ngraph->nrows = graph->nrows;
ncols = ngraph->ncols = graph->ncols;
rowptr = graph->rowptr;
rowind = graph->rowind;
rowval = graph->rowval;
nrowptr = ngraph->rowptr = gk_zmalloc(nrows+1, "gk_graph_Prune: nrowptr");
nrowind = ngraph->rowind = gk_imalloc(rowptr[nrows], "gk_graph_Prune: nrowind");
nrowval = ngraph->rowval = gk_fmalloc(rowptr[nrows], "gk_graph_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_graph_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_graph_Free(&ngraph);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return ngraph;
}
/*************************************************************************/
/*! Normalizes the rows/columns of the graphrix to be unit
length.
\param graph the graphrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_graph_Normalize(gk_graph_t *graph, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && graph->rowval) {
n = graph->nrows;
ptr = graph->rowptr;
val = graph->rowval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j]; /* assume val[j] > 0 */
}
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
if (what&GK_CSR_COL && graph->colval) {
n = graph->ncols;
ptr = graph->colptr;
val = graph->colval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++)
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j];
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
}
#endif
|
@mropes.nim.c | /* Generated by Nim Compiler v1.0.10 */
/* (c) 2019 Andreas Rumpf */
/* The generated code is subject to the original license. */
#define NIM_INTBITS 64
#include "nimbase.h"
#include <string.h>
#include <stdio.h>
#undef LANGUAGE_C
#undef MIPSEB
#undef MIPSEL
#undef PPC
#undef R3000
#undef R4000
#undef i386
#undef linux
#undef mips
#undef near
#undef far
#undef powerpc
#undef unix
#define nimfr_(x, y)
#define nimln_(x, y)
typedef struct tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA;
typedef struct TNimType TNimType;
typedef struct TNimNode TNimNode;
typedef struct RootObj RootObj;
typedef struct NimStringDesc NimStringDesc;
typedef struct TGenericSeq TGenericSeq;
typedef struct tySequence__WwUFq9cJ2xKRlsAWVEHyPRg tySequence__WwUFq9cJ2xKRlsAWVEHyPRg;
typedef struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g;
typedef struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w;
typedef struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ;
typedef struct tyObject_GcStack__7fytPA5bBsob6See21YMRA tyObject_GcStack__7fytPA5bBsob6See21YMRA;
typedef struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg;
typedef struct tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ;
typedef struct tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg;
typedef struct tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw;
typedef struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA;
typedef struct tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw;
typedef struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw;
typedef struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg;
typedef struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyTuple__ujsjpB2O9cjj3uDHsXbnSzg;
typedef struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg;
typedef struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ;
typedef struct tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg;
typedef tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* tyArray__USLYl0Lpkimm4FABiJ3ldA[4096];
typedef NU8 tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A;
typedef NU8 tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ;
typedef N_NIMCALL_PTR(void, tyProc__ojoeKfW4VYIm36I9cpDTQIg) (void* p, NI op);
typedef N_NIMCALL_PTR(void*, tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ) (void* p);
struct TNimType {
NI size;
tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A kind;
tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ flags;
TNimType* base;
TNimNode* node;
void* finalizer;
tyProc__ojoeKfW4VYIm36I9cpDTQIg marker;
tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ deepcopy;
};
typedef NU8 tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ;
struct TNimNode {
tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ kind;
NI offset;
TNimType* typ;
NCSTRING name;
NI len;
TNimNode** sons;
};
struct RootObj {
TNimType* m_type;
};
struct TGenericSeq {
NI len;
NI reserved;
};
struct NimStringDesc {
TGenericSeq Sup;
NIM_CHAR data[SEQ_DECL_SIZE];
};
struct tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA {
RootObj Sup;
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* left;
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* right;
NI L;
NimStringDesc* data;
};
typedef N_NIMCALL_PTR(void, tyProc__T4eqaYlFJYZUv9aG9b1TV0bQ) (void);
struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g {
NI refcount;
TNimType* typ;
};
struct tyObject_GcStack__7fytPA5bBsob6See21YMRA {
void* bottom;
};
struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w {
NI len;
NI cap;
tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g** d;
};
typedef tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ* tyArray__SiRwrEKZdLgxqz9a9aoVBglg[512];
typedef NU32 tyArray__BHbOSqU1t9b3Gt7K2c6fQig[24];
typedef tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* tyArray__N1u1nqOgmuJN9cSZrnMHgOQ[32];
typedef tyArray__N1u1nqOgmuJN9cSZrnMHgOQ tyArray__B6durA4ZCi1xjJvRtyYxMg[24];
typedef tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw* tyArray__lh2A89ahMmYg9bCmpVaplLbA[256];
struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA {
tyArray__lh2A89ahMmYg9bCmpVaplLbA data;
};
typedef tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* tyArray__0aOLqZchNi8nWtMTi8ND8w[2];
struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw {
tyArray__0aOLqZchNi8nWtMTi8ND8w link;
NI key;
NI upperBound;
NI level;
};
struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg {
tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* Field0;
NI Field1;
};
typedef tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyArray__LzOv2eCDGiceMKQstCLmhw[30];
struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg {
NI len;
tyArray__LzOv2eCDGiceMKQstCLmhw chunks;
tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg* next;
};
struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg {
NI minLargeObj;
NI maxLargeObj;
tyArray__SiRwrEKZdLgxqz9a9aoVBglg freeSmallChunks;
NU32 flBitmap;
tyArray__BHbOSqU1t9b3Gt7K2c6fQig slBitmap;
tyArray__B6durA4ZCi1xjJvRtyYxMg matrix;
tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw* llmem;
NI currMem;
NI maxMem;
NI freeMem;
NI occ;
NI lastSize;
tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA chunkStarts;
tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* root;
tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* deleted;
tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* last;
tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* freeAvlNodes;
NIM_BOOL locked;
NIM_BOOL blockChunkSizeIncrease;
NI nextChunkSize;
tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw bottomData;
tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg heapLinks;
};
struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg {
NI stackScans;
NI cycleCollections;
NI maxThreshold;
NI maxStackSize;
NI maxStackCells;
NI cycleTableSize;
NI64 maxPause;
};
struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ {
NI counter;
NI max;
tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg* head;
tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg** data;
};
struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ {
tyObject_GcStack__7fytPA5bBsob6See21YMRA stack;
NI cycleThreshold;
NI zctThreshold;
tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w zct;
tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w decStack;
tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tempStack;
NI recGcLock;
tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg region;
tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg stat;
tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ marked;
tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w additionalRoots;
NI gcThreadId;
};
typedef NU8 tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg;
typedef NIM_CHAR tyArray__9bKy7UA2LOi2vzOViufaW1Q[1024];
struct tySequence__WwUFq9cJ2xKRlsAWVEHyPRg {
TGenericSeq Sup;
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* data[SEQ_DECL_SIZE];
};
N_NIMCALL(void, nimGCvisit)(void* d, NI op);
static N_NIMCALL(void, Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ)(void* p, NI op);
static N_NIMCALL(void, TM__Vw9cfUOQOae9b9bzZBlucMZQg_3)(void);
N_NIMCALL(void, nimRegisterGlobalMarker)(tyProc__T4eqaYlFJYZUv9aG9b1TV0bQ markerProc);
N_NIMCALL(NimStringDesc*, mnewString)(NI len);
N_LIB_PRIVATE N_NIMCALL(NI, len__9b0YRltzV3kNSE9aQTsG82wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a);
N_NIMCALL(NimStringDesc*, setLengthStr)(NimStringDesc* s, NI newLen);
N_NIMCALL(void*, newSeq)(TNimType* typ, NI len);
static N_INLINE(void, asgnRef)(void** dest, void* src);
static N_INLINE(void, incRef__AT1eRuflKWyTTBdLjEDZbg_3system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c);
static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem)(void* usr);
static N_INLINE(void, decRef__AT1eRuflKWyTTBdLjEDZbgsystem)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c);
static N_INLINE(void, rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c);
N_LIB_PRIVATE N_NOINLINE(void, addZCT__Y66tOYFjgwJ0k4aLz4bc0Q)(tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w* s, tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c);
static N_INLINE(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem)(tySequence__WwUFq9cJ2xKRlsAWVEHyPRg** s);
N_NIMCALL(TGenericSeq*, setLengthSeqV2)(TGenericSeq* s, TNimType* typ, NI newLen);
N_NIMCALL(void, unsureAsgnRef)(void** dest, void* src);
N_NIMCALL(TGenericSeq*, incrSeqV3)(TGenericSeq* s, TNimType* typ);
static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src);
static N_INLINE(void, copyMem__i80o3k0SgEI5gTRCzYdyWAsystem)(void* dest, void* source, NI size);
static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size);
N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest, NI addlen);
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA)(NimStringDesc* frmt, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0);
N_LIB_PRIVATE N_NIMCALL(void, add__yG4AKzsBRS1W4MANDlXQeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, NimStringDesc* b);
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___Z7W1o5nPSc3ExfO5f7j1Gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, NimStringDesc* b);
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___ShdZ6VrAQkY0nWR9a39b9bGdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b);
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, newRope__dBdikNFB2Y7QJ9aVJE7dGHg)(NimStringDesc* data);
N_NIMCALL(void*, newObj)(TNimType* typ, NI size);
N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src);
static N_INLINE(void, nimGCunrefNoCycle)(void* p);
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__yShmEg9cffWxI7s5XzEKBow)(NimStringDesc* s);
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, insertInCache__yShmEg9cffWxI7s5XzEKBow_2)(NimStringDesc* s);
N_LIB_PRIVATE N_NIMCALL(NI, hash__6PCYkKlCNhq9cnRLnqWKkwQ)(NimStringDesc* x);
static N_INLINE(NIM_BOOL, eqStrings)(NimStringDesc* a, NimStringDesc* b);
static N_INLINE(NIM_BOOL, equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem)(void* a, void* b, NI size);
static N_INLINE(int, nimCmpMem)(void* a, void* b, NI size);
N_LIB_PRIVATE N_NIMCALL(void, add__IM4kcMNkkOLJtqdEqSxR8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b);
N_LIB_PRIVATE N_NIMCALL(void, failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A)(NimStringDesc* msg);
N_NIMCALL(NimStringDesc*, rawNewString)(NI space);
N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, substr__2yh9cer0ymNRHlOOg8P7IuA)(NimStringDesc* s, NI first, NI last);
N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x);
N_LIB_PRIVATE N_NIMCALL(void, write__PArlm09bKklm2BLsCg6YtaA)(FILE* f, NimStringDesc* s);
N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, open__gq12VLhVO0NBzUTnGgz4nw)(FILE** f, NimStringDesc* filename, tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg mode, NI bufSize);
N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__9bihNFg7Qajcg9arfx5cr9aHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, FILE* f);
static N_INLINE(void, nimZeroMem)(void* p, NI size);
static N_INLINE(void, nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory)(void* a, int v, NI size);
N_LIB_PRIVATE N_NIMCALL(NI, readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA_2)(FILE* f, void* buffer, NI len);
static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s);
N_LIB_PRIVATE N_NIMCALL(void, close__fU6ZlJAtQ9bre04EDZLdGsA_3)(FILE* f);
N_LIB_PRIVATE N_NIMCALL(void, writeRope__FwuzOBq6SLlanVUstm8q9cA)(FILE* f, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r);
N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__Wiam9c8x73Mtmbj0r4Ppikg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename);
N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRope__LLRRC42xWBSkxzV9bsPu7lA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* head, NimStringDesc* filename);
tyArray__USLYl0Lpkimm4FABiJ3ldA cache__WGMp5Wo1NlgbAMOysPIfmQ;
extern TNimType NTI__ytyiCJqK439aF9cIibuRVpAg_;
TNimType NTI__OFzf0kSiPTcNreUIeJgWVA_;
extern TNimType NTI__rR5Bzr1D5krxoo1NcNyeMA_;
extern TNimType NTI__77mFvmsOLKik79ci2hXkHEg_;
TNimType NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_;
TNimType NTI__USLYl0Lpkimm4FABiJ3ldA_;
NI gCacheTries__5GfZTThHPBfB9bjRZdFluBw;
NI gCacheMisses__fLRm9am8S0daYBVNK6JKyBg;
NI gCacheIntTries__opyfsNv023Md1P05mqsDew;
extern TNimType NTI__WwUFq9cJ2xKRlsAWVEHyPRg_;
extern tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ gch__IcYaEuuWivYAS86vFMTS3Q;
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_4, "$", 1);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_5, "ropes.nim(238, 20) `false` invalid format string: ", 50);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_6, "ropes.nim(250, 20) `false` invalid format string: ", 50);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_7, "ropes.nim(253, 20) `false` invalid format string: ", 50);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_8, "\012", 1);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_9, "ropes.nim(263, 18) `false` invalid format string: ", 50);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_10, "[$1, $2, $3]", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_11, "FR_.len-=$1;$n", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_12, "} $1: ;$n", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_13, "}$n", 3);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_14, "FR_.len+=$1;$n", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_15, "void", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_16, ", ", 2);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_17, "$1 $2;$n", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_18, "typedef $1 $2 $2;$n", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_19, "*", 1);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_20, " ", 1);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_21, ", NI $1Len_$2", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_22, " Result", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_23, "$1$2($3, $4)$5", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_24, "(*$1)", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_25, "static TNimType* $1;$n", 22);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_26, "\011$1 = (TNimType*)hcrGetGlobal($2, \"$1\");$n", 42);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_27, "extern TNimType $1;$n", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_28, "NTI$1_", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_29, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_30, "$1.flags = $2;$n", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_31, "$1.name = $2;$n", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_32, "$1.nextType = nimTypeRoot; nimTypeRoot=&$1;$n", 45);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_33, "\011hcrRegisterGlobal($2, \"$1\", sizeof(TNimType), NULL, (void**)&$"
"1);$n", 68);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_34, "TNimType $1;$n", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_35, "$1[$2]", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_36, "static TNimNode** $1;$n", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_37, "\011hcrRegisterGlobal($3, \"$1\", sizeof(TNimNode*) * $2, NULL, (voi"
"d**)&$1);$n", 74);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_38, "static TNimNode* $1[$2];$n", 26);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_39, "$1[$2] = &$3;$n", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_40, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$"
"n$1.name = \"Field$3\";$n", 86);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_41, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_42, "$1.len = $2; $1.kind = 2;$n", 27);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_43, "$1.node = &$2;$n", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_44, "static N_NIMCALL(void, $1)(void* p, NI op)", 42);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_45, "$1 a;$n", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_46, "a = ($1)p;$n", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_47, "for ($1 = 0; $1 < $2; $1++) {$n", 31);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_48, "($1 \? $1->$2 : 0)", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_49, "$1.Sup", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_50, "#pragma pack(push, 1)$nstruct{", 30);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_51, "};$n", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_52, "#pragma pack(pop)$n", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_53, "union{$n$1};$n", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_54, "$1 $2[SEQ_DECL_SIZE];$n", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_55, "$1 $2:$3;$n", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_56, "switch ($1.$2) {$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_57, "case $1 ... $2:$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_58, "(-2147483647 -1)", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_59, "IL64($1)", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_60, "(IL64(-9223372036854775807) - IL64(1))", 38);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_61, "NIM_TRUE", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_62, "NIM_FALSE", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_63, "(($1) $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_64, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_65, "STRING_LITERAL($1, $2, $3);$n", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_66, "static const struct {$n NI cap; void* allocator; NIM_CHAR data"
"[$2+1];$n} $1 = { $2, NIM_NIL, $3 };$n", 101);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_67, "static const NimStringV2 $1 = {$2, (NimStrPayload*)&$3};$n", 58);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_68, "case $1:$n", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_69, "default:$n", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_70, "break;$n", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_71, "} $n", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_72, "$1.$2", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_73, "$1$3[$2]", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_74, "$1 {$n$2$3$4}\012", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_75, "$1;\012", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_76, "N_NIMCALL_PTR(void, $1)(void*, NI);\012", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_77, "\011$1 = (N_NIMCALL_PTR(void, )(void*, NI)) hcrRegisterProc($3, \"$"
"1\", (void*)$2);\012", 79);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_78, "$1.marker = $2;$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_79, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_80, "$1.offset = $2;$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_81, "NI $1;$n", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_82, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_83, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o"
"ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_84, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_85, "$1.flags = 1<<2;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_86, "$1.destructor = (void*)$2; $1.size = sizeof($3); $1.name = $4;$"
"n", 64);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_87, "NimDT_$1_$2", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_88, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_89, "TNimNode* $1[$2];$n", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_90, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n", 74);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_91, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_92, "Result", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_93, "$N#line $2 $1$N", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_94, "struct {$1} GCFRAME_;$n", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_95, "\011}BeforeRet_: ;$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_96, "}$N", 3);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_97, "\011$1 = ($3) hcrRegisterProc($4, \"$1\", (void*)$2);$n", 50);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_98, "$1(*)$2", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_99, "static void* $1;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_100, "\011$1 = ($2) ($3$4));$n", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_101, "$2 $1;$n", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_102, "\011$1 = ($2) hcrRegisterProc($3, \"$1\", (void*)$1);$n", 50);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_103, "\011$1 = ($2) hcrGetProc($3, \"$1\");$n", 34);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_104, " $1;$n", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_105, "\011$1 = ($2*)hcrGetGlobal($3, \"$1\");$n", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_106, "NIM_CHECK_SIZE($1, $2);$n", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_107, "typedef NI32 $1;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_108, "typedef NU8 $1;$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_109, "typedef NU16 $1;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_110, "typedef NI64 $1;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_111, "typedef $1_PTR($2, $3) $4;$n", 28);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_112, "typedef struct {$nN_NIMCALL_PTR($2, ClP_0) $3;$nvoid* ClE_0;$n}"
" $1;$n", 69);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_113, "typedef $1 $2[1];$n", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_114, "typedef $1 $2[$3];$n", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_115, " {$n", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_116, "char dummy;$n", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_117, "TY", 2);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_118, "typedef $1 $2;$n", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_119, "$1 $2 {$n", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_120, "$1 Field$2;$n", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_121, "typedef NU$2 $1;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_122, "typedef NU8 $1[$2];$n", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_123, "Field$1", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_124, "NIM_CONST $1 $2 = $3;$n", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_125, ",$n", 3);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_126, "{$1, ($2*)&$3}", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_127, "{{$1, $1 | NIM_STRLIT_FLAG}", 27);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_128, "(($1)&$2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_129, "{NIM_NIL,NIM_NIL}", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_130, "{(($1) $2),NIM_NIL}", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_131, "$1,$n", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_132, "$1", 2);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_133, "{{$1}}", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_134, "{$1}$n", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_135, "{$1, (NimStrPayload*)&$2}", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_136, "extern NIM_CONST $1 $2;$n", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_137, "goto NIMSTATE_$#;$n", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_138, "$2* $1;$n", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_139, "\011NimThreadVars* NimTV_;$n", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_140, "static N_NIMCALL(void, $1)(void)", 32);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_141, "$1 {$n$2$3$4}$n", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_142, "$1;$n", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_143, "//", 2);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_144, "$#;$n", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_145, "$#($#);$n", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_146, "$# = $#;$n", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_147, "NULL", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_148, "((NU8)($1))", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_149, "($4*)(($1)+($2)), ($3)-($2)+1", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_150, "($5*)($1)+(($2)-($4)), ($3)-($2)+1", 34);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_151, "($4*)($1)+($2), ($3)-($2)+1", 27);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_152, "($5*)(*$1)$4+($2), ($3)-($2)+1", 30);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_153, "($5*)$1$4+($2), ($3)-($2)+1", 27);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_154, "$1, $1Len_0", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_155, "(*$1)$3, $2", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_156, "$1$3, $2", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_157, "$1, $2", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_158, "$1.ClP_0($3$1.ClE_0);$n", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_159, "$1.ClE_0\? $1.ClP_0($3$1.ClE_0):(($4)($1.ClP_0))($2);$n", 54);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_160, "$1.ClP_0($3$1.ClE_0)", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_161, "$1.ClE_0\? $1.ClP_0($3$1.ClE_0):(($4)($1.ClP_0))($2)", 51);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_162, "(", 1);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_163, ")", 1);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_164, ";$n", 3);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_165, ");$n", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_166, "[", 1);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_167, ": ", 2);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_168, "Result: ", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_169, "];$n", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_170, "]", 1);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_171, "if ($1) goto $2;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_172, "if (!($1)) goto $2;$n", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_173, "$1: ;$n", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_174, "!($1)", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_175, "($3)((NU$2) ~($1))", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_176, "-($1)", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_177, "((NI$2)-($1))", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_178, "($1 > 0\? ($1) : -($1))", 22);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_179, "(($4)($1) + ($4)($2))", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_180, "(($4)($1) - ($4)($2))", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_181, "(($4)($1) * ($4)($2))", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_182, "(($4)($1) / ($4)($2))", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_183, "($4)((NU$5)($1) >> (NU$3)($2))", 30);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_184, "($4)((NU$3)($1) << (NU$3)($2))", 30);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_185, "($4)((NI$3)($1) >> (NU$3)($2))", 30);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_186, "($4)($1 & $2)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_187, "($4)($1 | $2)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_188, "($4)($1 ^ $2)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_189, "(($1 <= $2) \? $1 : $2)", 22);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_190, "(($1 >= $2) \? $1 : $2)", 22);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_191, "($4)((NU$3)($1) + (NU$3)($2))", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_192, "($4)((NU$3)($1) - (NU$3)($2))", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_193, "($4)((NU$3)($1) * (NU$3)($2))", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_194, "($4)((NU$3)($1) / (NU$3)($2))", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_195, "($4)((NU$3)($1) % (NU$3)($2))", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_196, "($1 == $2)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_197, "($1 <= $2)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_198, "($1 < $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_199, "((NU$3)($1) <= (NU$3)($2))", 26);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_200, "((NU$3)($1) < (NU$3)($2))", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_201, "((NU64)($1) <= (NU64)($2))", 26);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_202, "((NU64)($1) < (NU64)($2))", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_203, "((NU8)($1) == (NU8)($2))", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_204, "((NU8)($1) <= (NU8)($2))", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_205, "((NU8)($1) < (NU8)($2))", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_206, "($1 != $2)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_207, "($1.ClP_0 == $2.ClP_0 && $1.ClE_0 == $2.ClE_0)", 46);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_208, "($1)($2 $3 $4)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_209, "($#)($#)", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_210, ".Sup", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_211, "$1.m_type == $2", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_212, "static TNimType* $#[2];$n", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_213, "sizeof($1)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_214, "$1->finalizer = (void*)$2;$n", 28);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_215, "((NI)sizeof($1))", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_216, "((NI)alignof($1))", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_217, "((NI)offsetof($1, $2))", 22);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_218, "(*($1*) ($2))", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_219, "(($1) ($2))", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_220, "(($1) (ptrdiff_t) ($2))", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_221, "(*($1*) (&$2))", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_222, "($1-1)", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_223, "$1 |= ((NU8)1)<<(($2) & 7);$n", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_224, "($1- $2)", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_225, "$1 |= ((NU16)1)<<(($2) & 15);$n", 31);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_226, "$1 |= ((NU32)1)<<(($2) & 31);$n", 31);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_227, "$1 |= ((NU64)1)<<(($2) & 63);$n", 31);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_228, "$1 &= ~(((NU8)1) << (($2) & 7));$n", 34);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_229, "$1 &= ~(((NU16)1) << (($2) & 15));$n", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_230, "$1 &= ~(((NU32)1) << (($2) & 31));$n", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_231, "$1 &= ~(((NU64)1) << (($2) & 63));$n", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_232, "$1 >= $2 && $1 <= $3", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_233, "$1 == $2", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_234, "(($1 &((NU8)1<<((NU)($2)&7U)))!=0)", 34);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_235, "(($1 &((NU16)1<<((NU)($2)&15U)))!=0)", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_236, "(($1 &((NU32)1<<((NU)($2)&31U)))!=0)", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_237, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_238, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_239, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_240, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_241, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_242, "static NIM_CONST $1 $2 = $3;$n", 30);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_243, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1"
")&7U));$n", 72);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_244, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_245, "$1 = 0;$n", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_246, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=(($5)(1)<<(($1)%(sizeof($5"
")*8)));$n", 72);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_247, "$1 |=(($3)(1)<<(($2)%(sizeof($3)*8)));$n", 40);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_248, "$1.Field$2", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_249, "LOC$1.source", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_250, "LOC$#.dest", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_251, ".Field$1", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_252, ".$1", 3);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_253, "TFrame $1;$n", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_254, "if (!$1) goto $2;$n", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_255, "goto $1;$n", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_256, "TMP$1_", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_257, "static void* $#[$#] = {", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_258, "&&TMP$#_, ", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_259, "&&TMP$#_};$n", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_260, "goto *$#[$#];$n", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_261, "TMP$#_:$n", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_262, "case $1: $n$2break;$n", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_263, "goto LA$1_;$n", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_264, "LA$1_: ;$n", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_265, "NIMSTATE_$#:$n", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_266, "switch ($1) {$n", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_267, "default: __assume(0);$n", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_268, "goto BeforeRet_;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_269, "throw;$n", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_270, "else", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_271, "throw $1;$n", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_272, "$n#pragma omp $4$nfor ($1 = $2; $1 <= $3; ++$1)", 47);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_273, "$n#pragma omp $5$nfor ($1 = $2; $1 <= $3; $1 += $4)", 51);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_274, "case -1:$n", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_275, " goto BeforeRet_;$n", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_276, "case $2: goto $1$2;$n", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_277, "(((NI*) $1)[1] < 0)", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_278, "((((NI*) $1.ClE_0)[1]) < 0)", 27);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_279, "$1 N_NIMCALL(void, $2)(void) {$N", 32);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_280, "\011int* nim_hcr_dummy_ = 0;$n\011NIM_BOOL nim_hcr_do_init_ = hcrRegi"
"sterGlobal($1, \"module_initialized_\", 1, NULL, (void**)&nim_hcr_"
"dummy_);$n", 137);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_281, "{$N", 3);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_282, "\011TFrame FR_; FR_.len = 0;$N", 27);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_283, "}$N$N", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_284, "N_LIB_EXPORT N_NIMCALL(void, $1)(void* handle, N_NIMCALL_PTR(vo"
"id*, getProcAddr)(void*, char*)) {$N", 99);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_285, "static $2 $1;$n", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_286, "\011$1 = ($2) $3($4, $5);$n", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_287, "NIM_EXTERNC N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 58);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_288, "N_LIB_EXPORT N_NIMCALL(void, HcrCreateTypeInfos)(void) {$N", 58);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_289, "$nN_LIB_PRIVATE const char* hcr_module_list[] = {$n", 51);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_290, "\011$1,$n", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_291, "\011\"\"};$n", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_292, "$nN_LIB_EXPORT N_NIMCALL(void**, HcrGetImportedModules)() { ret"
"urn (void**)hcr_module_list; }$n", 95);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_293, "$nN_LIB_EXPORT N_NIMCALL(char*, HcrGetSigHash)() { return \"$1\";"
" }$n$n", 69);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_294, "static void* hcr_handle;$N", 26);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_295, "N_LIB_EXPORT N_NIMCALL(void, $1)(void);$N", 41);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_296, "N_LIB_EXPORT N_NIMCALL(void, $1)(void*, N_NIMCALL_PTR(void*, ge"
"tProcAddr)(void*, char*));$N", 91);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_297, "N_LIB_EXPORT N_NIMCALL(void, HcrCreateTypeInfos)(void);$N", 57);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_298, "\011$1();$N", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_299, "\011hcrInit((void**)hcr_module_list, $1, $2, $3, hcr_handle, nimGe"
"tProcAddr);$n", 76);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_300, "\011$1(hcr_handle, nimGetProcAddr);$N", 34);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_301, "\011hcrAddModule($1);\012", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_302, "\011HcrCreateTypeInfos();$N", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_303, "\011hcrRegisterGlobal($1, \"cmdCount\", sizeof(cmd_count), NULL, (vo"
"id**)&cmd_count);$N", 82);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_304, "\011hcrRegisterGlobal($1, \"cmdLine\", sizeof(cmd_line), NULL, (void"
"**)&cmd_line);$N", 79);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_305, "N_LIB_PRIVATE N_NIMCALL(void, $1)(void);$N", 42);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_306, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_307, "/* Generated by Nim Compiler v$1 */$N/* (c) 2019 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N", 131);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_308, "/* Generated by Nim Compiler v$1 */$N/* (c) 2019 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n"
" $5 */$N", 201);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_309, "#define NIM_INTBITS $1\012", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_310, "typedef struct {$1} NimThreadVars;$n", 36);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_311, "#include \"$1\"$N", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_312, "#include $1$N", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_313, "--file:r\"$1\"$N", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_314, "\012[Symbols]$n$1", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_315, "/* Generated by Nim Compiler v$1 */$N/* (c) 2017 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N", 131);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_316, "__$1__", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_317, "#ifndef $1$n#define $1$n", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_318, "N_CDECL(void, NimMain)(void);$n", 31);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_319, "#endif /* $1 */$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_320, "var F={procname:$1,prev:framePtr,filename:$2,line:0};$n", 55);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_321, "framePtr = F;$n", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_322, "var $1;$n", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_323, "if ($1 == undefined) {$n", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_324, "if ($1 === undefined) {$n", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_325, "var $1 = null;$n", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_326, "var $1_Idx = 0;$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_327, "[$1]", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_328, "new $1($2)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_329, "var $# = null;$n", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_330, "var $#_Idx = 0;$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_331, "var $# = $#;$n", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_332, "return [$#, $#];$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_333, "return $#;$n", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_334, "BeforeRet: do {$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_335, "} while (false);$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_336, "try {$n$1} catch (e) {$n alert(\"Unhandled exception:\\n\" + e.mes"
"sage + \"\\n\"$n}", 77);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_337, "function $#() { return $#.apply(this, arguments); }$n", 53);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_338, "function $#($#) {$n$#$#$#$#$#", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_339, "arrayConstr($1, $2, $3)", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_340, "NTI$1", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_341, "var $1 = {size: 0,kind: $2,base: null,node: null,finalizer: nul"
"l};$n", 68);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_342, "$1.base = $2;$n", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_343, "\"$1\": {kind: 1, offset: $1, typ: $2, name: $3, len: 0, sons: nu"
"ll}", 66);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_344, "var NNI$1 = {kind: 2, offset: 0, typ: null, name: null, len: $2"
", sons: {$3}};$n", 79);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_345, "var $1 = {size: 0, kind: $2, base: null, node: null, finalizer:"
" null};$n", 72);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_346, "$1.node = NNI$2;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_347, "var NNI$1 = $2;$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_348, "{kind: 2, len: $1, offset: 0, typ: null, name: null, sons: [$2]"
"}", 64);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_349, "{kind: 1, offset: \"$1\", len: 0, typ: $2, name: $3, sons: null}", 62);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_350, "[$1, $2]", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_351, "[setConstr($1), $2]", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_352, "{kind: 3, offset: \"$1\", len: $3, typ: $2, name: $4, sons: [$5]}", 63);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_353, "{kind: 1, offset: \"Field$1\", len: 0, typ: $2, name: \"Field$1\", "
"sons: null}", 74);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_354, "Field$1: $2", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_355, "m_type: $1", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_356, "$#: ", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_357, "({$1})", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_358, "nimCopy(null, $1, $2)", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_359, "Tmp$1", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_360, "var $1 = $2, $3 = $1[0], $3_Idx = $1[1];$n", 42);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_361, "$1 = nimCopy(null, $1, $2);$n", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_362, "$1[0][0]", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_363, "$1[0][1]", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_364, "$1[0]", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_365, "$1[1]", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_366, "makeNimstrLit($1)", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_367, "// line $2 \"$1\"$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_368, "F.line = $1;$n", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_369, "($1 || $2)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_370, "if ($1) $2 = true; else {", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_371, "$2 = $1;", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_372, "($1 && $2)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_373, "if (!$1) $2 = false; else {", 27);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_374, "$1[0][$1[1]]", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_375, "($1 = $2, $1)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_376, "$1 = (($5 $2 $3) $4)", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_377, "(($1 $2 $3) $4)", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_378, "addInt($1, $2)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_379, "($1 + $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_380, "subInt($1, $2)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_381, "($1 - $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_382, "mulInt($1, $2)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_383, "($1 * $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_384, "divInt($1, $2)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_385, "Math.trunc($1 / $2)", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_386, "modInt($1, $2)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_387, "Math.trunc($1 % $2)", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_388, "($1 / $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_389, "($1 << $2)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_390, "($1 >> $2)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_391, "($1 & $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_392, "($1 | $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_393, "($1 ^ $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_394, "nimMin($1, $2)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_395, "nimMax($1, $2)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_396, "($1 % $2)", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_397, "negInt($1)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_398, "negInt64($1)", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_399, "absInt($1)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_400, "Math.abs($1)", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_401, "+($1)", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_402, "~($1)", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_403, "nimCharToStr($1)", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_404, "nimBoolToStr($1)", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_405, "cstrToNimstr(($1)+\"\")", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_406, "cstrToNimstr($1)", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_407, "(($1 $2) >>> $3)", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_408, "($# == $# && $# == $#)", 22);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_409, "var $1 = $2; $2 = $3; $3 = $1;$n", 32);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_410, "var $1 = $2; $2 = $3; $3 = $1;", 30);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_411, "$1 - 1", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_412, "subInt($1, 1)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_413, "if ($1 != null) { addChar($3, $2); } else { $3 = [$2]; }", 56);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_414, "if ($1 != null) { $4 += $2; } else { $4 = $2$3; }", 49);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_415, ".slice()", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_416, "if ($1 != null) { $4 = ($4).concat($2); } else { $4 = $2$3; }", 61);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_417, "if ($1 != null) { $3.push($2); } else { $3 = [$2]; }", 52);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_418, "var $1 = nimCopy(null, $2, $3);$n", 33);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_419, "[$1].concat(", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_420, "($1 || []).concat(", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_421, "[$1],", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_422, "$1 || [],", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_423, "[$1])", 5);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_424, "$1 || [])", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_425, "eqStrings($1, $2)", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_426, "(cmpStrings($1, $2) <= 0)", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_427, "(cmpStrings($1, $2) < 0)", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_428, "($1 == null)", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_429, "($# == null && $# === 0)", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_430, "$1 = $2;$n", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_431, "$1 = [$3]; $2 = 0;$n", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_432, "$1 = [[$2], 0];$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_433, "($1 \? 1:0)", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_434, "($1 != null \? $2.length : 0)", 28);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_435, "$1.length", 9);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_436, "($1 != null \? ($2.length-1) : -1)", 33);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_437, "$1 += $2", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_438, "$1 = addInt($3, $2)", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_439, "$1 -= $2", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_440, "$1 = subInt($3, $2)", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_441, "($1 == null \? $3 = mnewString($2) : $3.length = $2)", 51);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_442, "if ($1 === null) $4 = [];\012 if ($4.length < $2) { "
"for (var i=$4.length;i<$5;++i) $4.push($3); }\012 els"
"e { $4.length = $5; }", 148);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_443, "SetCard($1)", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_444, "SetLt($1, $2)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_445, "SetLe($1, $2)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_446, "SetEq($1, $2)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_447, "SetMul($1, $2)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_448, "SetPlus($1, $2)", 15);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_449, "SetMinus($1, $2)", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_450, "$1[$2] = true", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_451, "delete $1[$2]", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_452, "($1[$2] != undefined)", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_453, "$1 = new Array($2); for (var i=0;i<$2;++i) {$1[i]=$3;}", 54);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_454, "[]", 2);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_455, "($1.m_type == $2)", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_456, "isObj($1.m_type, $2)", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_457, "$1 = null, $2 = 0;$n", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_458, "$1 = genericReset($3, $2);$n", 28);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_459, "($1.slice($2))", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_460, "mnewString($1)", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_461, "mnewString(0)", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_462, "($1 = $2, $1[0]), $1[1]", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_463, "($1 = $2, $1)[0]", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_464, "($1.slice($2, $3+1))", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_465, "var $1 = $2;$n", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_466, "Field$#: [$#, $#]", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_467, "Field$#: $#", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_468, "$#: [$#, $#]", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_469, "$#: $#", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_470, "{$1}", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_471, "(!!($1))", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_472, "(($1)|0)", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_473, "if ($1[$2.$3]$4undefined) { raiseFieldError(makeNimstrLit($5));"
" }$n", 67);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_474, "!==", 3);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_475, "===", 3);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_476, "chckIndx($1, $2, ($3 != null \? $3.length : 0)+$2-1)-$2", 54);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_477, "($1)-$2", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_478, "$1.charCodeAt($2)", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_479, "($1 $2)", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_480, "($1|0)", 6);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_481, "($1 - ($2 $3))", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_482, "null", 4);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_483, "chckRange($1, $2, $3)", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_484, "toJSStr($1)", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_485, "L$1: do {$n", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_486, "} while(false);$n", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_487, "else {$n", 8);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_488, "if ($1) {$n", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_489, "L$1: while (true) {$n", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_490, "if (!$1) break L$2;$n", 21);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_491, "switch (toJSStr($1)) {$n", 24);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_492, "default: $n", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_493, "break BeforeRet;$n", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_494, "break L$1;$n", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_495, "$1 = nimCopy(null, $2, $3);$n", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_496, "nimCopy($1, $2, $3);$n", 22);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_497, "var $1 = $4; $2 = $1[0]; $3 = $1[1];$n", 38);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_498, "$# = [$#, $#];$n", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_499, "$1 = $2; $3 = $4;$n", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_500, "try {$n", 7);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_501, "--excHandler;$n} catch (EXC) {$n var prevJSError = lastJSError;"
"$n lastJSError = EXC;$n --excHandler;$n", 102);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_502, "framePtr = $1;$n", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_503, "lastJSError instanceof $1", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_504, "isObj(lastJSError.m_type, $1)", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_505, "if (lastJSError && ($1)) {$n", 28);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_506, "var $1 = lastJSError;$n", 23);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_507, "lastJSError = prevJSError;$n", 28);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_508, "raiseException($1, $2);$n", 25);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_509, "$1 = true;$n", 12);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_510, "/* Generated by the Nim Compiler v$1 */$n/* (c) 2019 Andreas "
"Rumpf */$n$nvar framePtr = null;$nvar excHandler = 0;$nvar lastJ"
"SError = null;$nif (typeof Int8Array === \'undefined\') Int8Array "
"= Array;$nif (typeof Int16Array === \'undefined\') Int16Array = Ar"
"ray;$nif (typeof Int32Array === \'undefined\') Int32Array = Array;"
"$nif (typeof Uint8Array === \'undefined\') Uint8Array = Array;$nif"
" (typeof Uint16Array === \'undefined\') Uint16Array = Array;$nif ("
"typeof Uint32Array === \'undefined\') Uint32Array = Array;$nif (ty"
"peof Float32Array === \'undefined\') Float32Array = Array;$nif (ty"
"peof Float64Array === \'undefined\') Float64Array = Array;$n", 633);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_511, "Deprecated", 10);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_512, "Deprecated:", 11);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_513, "\012<p><strong class=\"examples_text\">$1</strong></p>\012", 50);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_514, "\012\\textbf{$1}\012", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_515, "<span class=\"Comment\">$1</span>", 31);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_516, "\\spanComment{$1}", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_517, "<span class=\"Keyword\">$1</span>", 31);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_518, "\\spanKeyword{$1}", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_519, "<span class=\"Operator\">$1</span>", 32);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_520, "\\spanOperator{$1}", 17);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_521, "<span class=\"StringLit\">$1</span>", 33);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_522, "\\spanStringLit{$1}", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_523, "<span class=\"CharLit\">$1</span>", 31);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_524, "\\spanCharLit{$1}", 16);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_525, "<span class=\"DecNumber\">$1</span>", 33);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_526, "\\spanDecNumber{$1}", 18);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_527, "<span class=\"FloatNumber\">$1</span>", 35);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_528, "\\spanFloatNumber{$1}", 20);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_529, "<a href=\"#$2\"><span class=\"Identifier\">$1</span></a>", 52);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_530, "\\spanIdentifier{$1}", 19);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_531, "<a href=\"$1#$2\"><span class=\"Identifier\">$3</span></a>", 54);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_532, "<span class=\"Identifier\">$1</span>", 34);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_533, "<span><span class=\"Other\">{</span><span class=\"Other pragmadots"
"\">...</span><span class=\"Other\">}</span></span><span class=\"prag"
"mawrap\"><span class=\"Other\">$1</span><span class=\"pragma\">", 185);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_534, "\\spanOther{$1}", 14);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_535, "</span><span class=\"Other\">$1</span></span>", 43);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_536, "<span class=\"Other\">$1</span>", 29);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_537, "<a class=\"reference external\" href=\"$2\">$1</a>", 46);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_538, "<a href=\"$2#$1\"><span class=\"Identifier\">$1</span></a>", 54);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_539, "$1 -> \"$2\";$n", 13);
STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_540, "digraph $1 {$n$2}$n", 19);
static N_NIMCALL(void, Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ)(void* p, NI op) {
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a;
a = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)p;
nimGCvisit((void*)(*a).left, op);
nimGCvisit((void*)(*a).right, op);
nimGCvisit((void*)(*a).data, op);
}
static N_NIMCALL(void, TM__Vw9cfUOQOae9b9bzZBlucMZQg_3)(void) {
NI T1_;
T1_ = (NI)0;
for (T1_ = 0; T1_ < 4096; T1_++) {
nimGCvisit((void*)cache__WGMp5Wo1NlgbAMOysPIfmQ[T1_], 0);
}
}
N_LIB_PRIVATE N_NIMCALL(NI, len__9b0YRltzV3kNSE9aQTsG82wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a) { NI result;
result = (NI)0;
{
if (!(a == NIM_NIL)) goto LA3_;
result = ((NI) 0);
}
goto LA1_;
LA3_: ;
{
result = ((*a).L > 0? ((*a).L) : -((*a).L));
}
LA1_: ;
return result;
}
static N_INLINE(void, incRef__AT1eRuflKWyTTBdLjEDZbg_3system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = (NI)((NU64)((*c).refcount) + (NU64)(((NI) 8)));
}
static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem)(void* usr) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* result;
result = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0;
result = ((tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*) ((NI)((NU64)(((NI) (ptrdiff_t) (usr))) - (NU64)(((NI) 16)))));
return result;
}
static N_INLINE(void, rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { addZCT__Y66tOYFjgwJ0k4aLz4bc0Q((&gch__IcYaEuuWivYAS86vFMTS3Q.zct), c);
}
static N_INLINE(void, decRef__AT1eRuflKWyTTBdLjEDZbgsystem)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = (NI)((NU64)((*c).refcount) - (NU64)(((NI) 8)));
{
if (!((NU64)((*c).refcount) < (NU64)(((NI) 8)))) goto LA3_;
rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system(c);
}
LA3_: ;
}
static N_INLINE(void, asgnRef)(void** dest, void* src) { {
tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T5_;
if (!!((src == NIM_NIL))) goto LA3_;
T5_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0;
T5_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem(src);
incRef__AT1eRuflKWyTTBdLjEDZbg_3system(T5_);
}
LA3_: ;
{
tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T10_;
if (!!(((*dest) == NIM_NIL))) goto LA8_;
T10_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0;
T10_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem((*dest));
decRef__AT1eRuflKWyTTBdLjEDZbgsystem(T10_);
}
LA8_: ;
(*dest) = src;
}
static N_INLINE(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem)(tySequence__WwUFq9cJ2xKRlsAWVEHyPRg** s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
NI L;
NI T1_;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
T1_ = ((*s) ? (*s)->Sup.len : 0);
L = (NI)(T1_ - ((NI) 1));
result = (*s)->data[L];
unsureAsgnRef((void**) (&(*s)), (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) setLengthSeqV2(&((*s))->Sup, (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), ((NI) (L))));
return result;
}
static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size) { void* T1_;
T1_ = (void*)0;
T1_ = memcpy(dest, source, ((size_t) (size)));
}
static N_INLINE(void, copyMem__i80o3k0SgEI5gTRCzYdyWAsystem)(void* dest, void* source, NI size) { nimCopyMem(dest, source, size);
}
static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src) { {
if (!!((src == NIM_NIL))) goto LA3_;
copyMem__i80o3k0SgEI5gTRCzYdyWAsystem(((void*) ((&(*dest).data[(*dest).Sup.len]))), ((void*) ((*src).data)), ((NI) ((NI)((*src).Sup.len + ((NI) 1)))));
(*dest).Sup.len += (*src).Sup.len;
}
LA3_: ;
}
N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, dollar___mZ66tEveFIQokq3arf8Klw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r) { NimStringDesc* result;
NI T1_;
result = (NimStringDesc*)0;
T1_ = (NI)0;
T1_ = len__9b0YRltzV3kNSE9aQTsG82wg(r);
result = mnewString(((NI) (T1_)));
result = setLengthStr(result, ((NI) 0));
{
NimStringDesc* s;
s = (NimStringDesc*)0;
{
tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack;
if (!!((r == NIM_NIL))) goto LA5_;
stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1);
asgnRef((void**) (&stack->data[0]), r);
{
while (1) {
NI T9_;
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it;
T9_ = (stack ? stack->Sup.len : 0);
if (!(((NI) 0) < T9_)) goto LA8;
it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack));
{
while (1) {
NI T12_;
if (!!(((*it).left == NIM_NIL))) goto LA11;
stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_));
T12_ = stack->Sup.len++;
asgnRef((void**) (&stack->data[T12_]), (*it).right);
it = (*it).left;
} LA11: ;
}
s = (*it).data;
result = resizeString(result, (s ? s->Sup.len : 0) + 0);
appendString(result, s);
} LA8: ;
}
}
LA5_: ;
}
return result;
}
static N_INLINE(void, nimGCunrefNoCycle)(void* p) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T1_;
T1_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0;
T1_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem(p);
decRef__AT1eRuflKWyTTBdLjEDZbgsystem(T1_);
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, newRope__dBdikNFB2Y7QJ9aVJE7dGHg)(NimStringDesc* data) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
NimStringDesc* T1_;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*) newObj((&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_), sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA));
(*result).Sup.m_type = (&NTI__OFzf0kSiPTcNreUIeJgWVA_);
(*result).L = ((NI64)-((data ? data->Sup.len : 0)));
T1_ = (NimStringDesc*)0;
T1_ = (*result).data; (*result).data = copyStringRC1(data);
if (T1_) nimGCunrefNoCycle(T1_);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___ShdZ6VrAQkY0nWR9a39b9bGdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
{
if (!(a == NIM_NIL)) goto LA3_;
result = b;
}
goto LA1_;
LA3_: ;
{
if (!(b == NIM_NIL)) goto LA6_;
result = a;
}
goto LA1_;
LA6_: ;
{
result = newRope__dBdikNFB2Y7QJ9aVJE7dGHg(((NimStringDesc*) NIM_NIL));
(*result).L = (NI)(((*a).L > 0? ((*a).L) : -((*a).L)) + ((*b).L > 0? ((*b).L) : -((*b).L)));
asgnRef((void**) (&(*result).left), a);
asgnRef((void**) (&(*result).right), b);
}
LA1_: ;
return result;
}
static N_INLINE(int, nimCmpMem)(void* a, void* b, NI size) { int result;
result = (int)0;
result = memcmp(a, b, ((size_t) (size)));
return result;
}
static N_INLINE(NIM_BOOL, equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem)(void* a, void* b, NI size) { NIM_BOOL result;
int T1_;
result = (NIM_BOOL)0;
T1_ = (int)0;
T1_ = nimCmpMem(a, b, size);
result = (T1_ == ((NI32) 0));
return result;
}
static N_INLINE(NIM_BOOL, eqStrings)(NimStringDesc* a, NimStringDesc* b) { NIM_BOOL result;
NI alen;
NI blen;
{ result = (NIM_BOOL)0;
alen = (a ? a->Sup.len : 0);
blen = (b ? b->Sup.len : 0);
{
if (!(alen == blen)) goto LA3_;
{
if (!(alen == ((NI) 0))) goto LA7_;
result = NIM_TRUE;
goto BeforeRet_;
}
LA7_: ;
result = equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem(((void*) ((&a->data[((NI) 0)]))), ((void*) ((&b->data[((NI) 0)]))), ((NI) (alen)));
goto BeforeRet_;
}
LA3_: ;
}BeforeRet_: ;
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, insertInCache__yShmEg9cffWxI7s5XzEKBow_2)(NimStringDesc* s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
NI h;
NI T1_;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
gCacheTries__5GfZTThHPBfB9bjRZdFluBw += ((NI) 1);
T1_ = (NI)0;
T1_ = hash__6PCYkKlCNhq9cnRLnqWKkwQ(s);
h = (NI)(T1_ & ((NI) 4095));
result = cache__WGMp5Wo1NlgbAMOysPIfmQ[(h)- 0];
{
NIM_BOOL T4_;
T4_ = (NIM_BOOL)0;
T4_ = (result == 0);
if (T4_) goto LA5_;
T4_ = !(eqStrings((*result).data, s));
LA5_: ;
if (!T4_) goto LA6_;
gCacheMisses__fLRm9am8S0daYBVNK6JKyBg += ((NI) 1);
result = newRope__dBdikNFB2Y7QJ9aVJE7dGHg(s);
asgnRef((void**) (&cache__WGMp5Wo1NlgbAMOysPIfmQ[(h)- 0]), result);
}
LA6_: ;
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__yShmEg9cffWxI7s5XzEKBow)(NimStringDesc* s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
{
if (!((s ? s->Sup.len : 0) == ((NI) 0))) goto LA3_;
result = NIM_NIL;
}
goto LA1_;
LA3_: ;
{
result = insertInCache__yShmEg9cffWxI7s5XzEKBow_2(s);
}
LA1_: ;
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___Z7W1o5nPSc3ExfO5f7j1Gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, NimStringDesc* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* T1_;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
T1_ = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
T1_ = rope__yShmEg9cffWxI7s5XzEKBow(b);
result = amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(a, T1_);
return result;
}
N_LIB_PRIVATE N_NIMCALL(void, add__yG4AKzsBRS1W4MANDlXQeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, NimStringDesc* b) { unsureAsgnRef((void**) (&(*a)), amp___Z7W1o5nPSc3ExfO5f7j1Gg((*a), b));
}
N_LIB_PRIVATE N_NIMCALL(void, add__IM4kcMNkkOLJtqdEqSxR8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { unsureAsgnRef((void**) (&(*a)), amp___ShdZ6VrAQkY0nWR9a39b9bGdQ((*a), b));
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA)(NimStringDesc* frmt, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
NI i;
NI length;
NI num;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
i = ((NI) 0);
length = (frmt ? frmt->Sup.len : 0);
result = NIM_NIL;
num = ((NI) 0);
{
while (1) {
NI start;
if (!(i < length)) goto LA2;
{
if (!((NU8)(frmt->data[i]) == (NU8)(36))) goto LA5_;
i += ((NI) 1);
switch (((NU8)(frmt->data[i]))) {
case 36:
{
add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_4));
i += ((NI) 1);
}
break;
case 35:
{
i += ((NI) 1);
add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[num]);
num += ((NI) 1);
}
break;
case 48 ... 57:
{
NI j;
j = ((NI) 0);
{
while (1) {
j = (NI)((NI)((NI)(j * ((NI) 10)) + ((NU8)(frmt->data[i]))) - ((NI) 48));
i += ((NI) 1);
{
NIM_BOOL T14_;
T14_ = (NIM_BOOL)0;
T14_ = ((frmt ? frmt->Sup.len : 0) <= i);
if (T14_) goto LA15_;
T14_ = !((((NU8)(frmt->data[i])) >= ((NU8)(48)) && ((NU8)(frmt->data[i])) <= ((NU8)(57))));
LA15_: ;
if (!T14_) goto LA16_;
goto LA10;
}
LA16_: ;
}
} LA10: ;
num = j;
{
if (!((NI)((argsLen_0-1) + ((NI) 1)) < j)) goto LA20_;
{
NimStringDesc* T26_;
if (!NIM_TRUE) goto LA24_;
T26_ = (NimStringDesc*)0;
T26_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50);
appendString(T26_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_5));
appendString(T26_, frmt);
failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T26_);
}
LA24_: ;
}
goto LA18_;
LA20_: ;
{
add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[(NI)(j - ((NI) 1))]);
}
LA18_: ;
}
break;
case 123:
{
NI j_2;
i += ((NI) 1);
j_2 = ((NI) 0);
{
while (1) {
if (!(((NU8)(frmt->data[i])) >= ((NU8)(48)) && ((NU8)(frmt->data[i])) <= ((NU8)(57)))) goto LA30;
j_2 = (NI)((NI)((NI)(j_2 * ((NI) 10)) + ((NU8)(frmt->data[i]))) - ((NI) 48));
i += ((NI) 1);
} LA30: ;
}
num = j_2;
{
if (!((NU8)(frmt->data[i]) == (NU8)(125))) goto LA33_;
i += ((NI) 1);
}
goto LA31_;
LA33_: ;
{
{
NimStringDesc* T40_;
if (!NIM_TRUE) goto LA38_;
T40_ = (NimStringDesc*)0;
T40_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50);
appendString(T40_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_6));
appendString(T40_, frmt);
failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T40_);
}
LA38_: ;
}
LA31_: ;
{
if (!((NI)((argsLen_0-1) + ((NI) 1)) < j_2)) goto LA43_;
{
NimStringDesc* T49_;
if (!NIM_TRUE) goto LA47_;
T49_ = (NimStringDesc*)0;
T49_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50);
appendString(T49_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_7));
appendString(T49_, frmt);
failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T49_);
}
LA47_: ;
}
goto LA41_;
LA43_: ;
{
add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[(NI)(j_2 - ((NI) 1))]);
}
LA41_: ;
}
break;
case 110:
{
add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8));
i += ((NI) 1);
}
break;
case 78:
{
add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8));
i += ((NI) 1);
}
break;
default:
{
{
NimStringDesc* T58_;
if (!NIM_TRUE) goto LA56_;
T58_ = (NimStringDesc*)0;
T58_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50);
appendString(T58_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_9));
appendString(T58_, frmt);
failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T58_);
}
LA56_: ;
}
break;
}
}
LA5_: ;
start = i;
{
while (1) {
if (!(i < length)) goto LA60;
{
if (!!(((NU8)(frmt->data[i]) == (NU8)(36)))) goto LA63_;
i += ((NI) 1);
}
goto LA61_;
LA63_: ;
{
goto LA59;
}
LA61_: ;
} LA60: ;
} LA59: ;
{
NimStringDesc* T70_;
if (!(start <= (NI)(i - ((NI) 1)))) goto LA68_;
T70_ = (NimStringDesc*)0;
T70_ = substr__2yh9cer0ymNRHlOOg8P7IuA(frmt, start, (NI)(i - ((NI) 1)));
add__yG4AKzsBRS1W4MANDlXQeg(&result, T70_);
}
LA68_: ;
} LA2: ;
}
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UQfMnMPks8jKz20fTXQy9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_10), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__KOisMGxcPhz6CcSmxgwEQQ)(NI64 i) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
NimStringDesc* T1_;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
gCacheIntTries__opyfsNv023Md1P05mqsDew += ((NI) 1);
T1_ = (NimStringDesc*)0;
T1_ = nimInt64ToStr(i);
result = rope__yShmEg9cffWxI7s5XzEKBow(T1_);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KxpxlR6eqq3gRIOYTfR67w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_11), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IFeEbVhQpPGgxkLehuSiBA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_12), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BYiowJAm8zF7RBRISElaLg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_13), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZkZcMxwzInnijXy5kz1K3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_14), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(void, prepend__IM4kcMNkkOLJtqdEqSxR8A_2)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { unsureAsgnRef((void**) (&(*a)), amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(b, (*a)));
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___4cYKitaHx6RQ9azRtQsZp6w)(NimStringDesc* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* T1_;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
T1_ = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
T1_ = rope__yShmEg9cffWxI7s5XzEKBow(a);
result = amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(T1_, b);
return result;
}
N_LIB_PRIVATE N_NIMCALL(void, writeRope__FwuzOBq6SLlanVUstm8q9cA)(FILE* f, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r) { {
NimStringDesc* s;
s = (NimStringDesc*)0;
{
tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack;
if (!!((r == NIM_NIL))) goto LA4_;
stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1);
asgnRef((void**) (&stack->data[0]), r);
{
while (1) {
NI T8_;
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it;
T8_ = (stack ? stack->Sup.len : 0);
if (!(((NI) 0) < T8_)) goto LA7;
it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack));
{
while (1) {
NI T11_;
if (!!(((*it).left == NIM_NIL))) goto LA10;
stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_));
T11_ = stack->Sup.len++;
asgnRef((void**) (&stack->data[T11_]), (*it).right);
it = (*it).left;
} LA10: ;
}
s = (*it).data;
write__PArlm09bKklm2BLsCg6YtaA(f, s);
} LA7: ;
}
}
LA4_: ;
}
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G9aA37gQrW88KHzpCAwhgjQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_15), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PoDv5ydEvGdd9aiIF9cOiAPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_16), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vzbf0XksfaFTXNoTT6BCwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_17), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lQVSDPkAFXHNoa1N7jYrNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_18), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6d8an6hdqiIrRjPW1wEh5Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_19), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gMbiWAc0IjihIq46IYhmAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_20), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uHsu7fLXac4OhMNd79bSJwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_21), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3WM9b4PeyDKoIDFMvYcQX3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_22), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___p4LhaCxKpUERrq9cB9b8Mp9cw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_23), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TbMwXzwNL7txOQADiTjwKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_24), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E0nDsXp7tY4mC1BnrrjWmA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_25), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mbjeaBETPixw9bUvyk31B6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_26), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AfR9bXoD9bcehKoM7F8O79bYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_27), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nlZFDYB4M9bmBbYqEropRVw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_28), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dwsIkeXQe0E8HKrzN9aRE5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_29), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fIR1FG0QPRsKvEYKq4tJUQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_30), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jADQs38xm62v1oxF2cSvEw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_31), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___DZV83DjWnQ9a19atC2oeswXg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_32), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sfvTjNjtOC86mU9bHczF6ow)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_33), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9ab1aKSDn70Vte0NcIItnaQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_34), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jadqNPnY9aM3oxYK6jarLrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_35), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LvsIDF8olc08xBiqCYIUog)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_36), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6Tfa1iP1ENVlWbe89cSELSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_37), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hKg2Id9cvzE5Dgl9cU31c4Vw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_38), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H3xXuIFdbz4MNb5T6BSfcQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_39), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ELXFo0GedkhGYj9bocTHZAg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_40), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9aLrcjgzGJE3f9ab2uR37jog)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_41), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3Q9c5iS9btBDBXZVoQktb1XQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_42), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MALQXTKXJv7x9a9c247satLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_43), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0nBiBCva6YS9a9bSV2Vr7Zxw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_44), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yyhPPkMkLJqWG6p8HGn9aoA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_45), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___t8gRNGR1flvaCNlBxuLn1A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_46), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xQaqlAwFuwxqBFixw7ewLg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_47), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2SWcbuU7RHQR0b8y9aJ9a5VQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_48), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gSgutt9b7GMWVGBkCt0UHAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_49), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vcuq0AWiVDndx4UH9cJ9cBRg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_50), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___l4wxq9cmPihXoF5xnDVNR1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_51), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zgEKWXsZtT6lqQ6XlgfrsA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_52), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uXZ30k0oJEqGPZW57O3dwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_53), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tTI9aMQiBZdiEeBIVh7QtYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_54), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VJBBlA9aMl5p0yYB1WzSMVg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_55), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jw4Sb0OSpKH1T5cLz7iyzA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_56), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0RQ2PINB4t8FjFlNUM6N9cQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_57), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LQ9bGxpANW8yeg5P9c0UYAaQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_58), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___f8tdlskieCnWysl9c9blzqZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_59), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KbFpNe1pZ7hIuQi7dp1dSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_60), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nunbo9aB0HmmYQJ3InIBEzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_61), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RBxLok7DyUB0aHl9bxPIl9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_62), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NARRjCd1x5Fr7NTTcoPRrw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_63), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NlLLwmZHOiJUpZfuk00AWA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_64), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mF9aI9b3hDjj53TD2C2gTrHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_65), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PafMws9cJ9arr9a0RVMoIHmAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_66), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3lAlmrWiRqEg9a9cd9a8kNhig)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_67), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___f8NIixSwWrk6SXQ3BFamWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_68), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TTRh79a14hh1gb0owIP1Y6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_69), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmeCjGna9cPfiHHcfqmKXjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_70), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FsfRVuOOBePjn9cQ9aK7Vh1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_71), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___paA0sar8RKZqiwEaDfWo2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_72), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jr9cXNQhhlLDfFJH4RSjeZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_73), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EnzikEr9bDhOR6GYxWuYSwQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_74), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QqzUiJcAEZE2azDhIWHrgg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_75), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___20ZujjIFPkyqvS2OmenEAA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_76), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vxo9ayk1xB18if39aZ1TBnKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_77), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NtQEfuK9bXszNTfYU57z19bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_78), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AKNexo4CH8G2vDeWW34Vpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_79), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LE3oWAmB5YDSDHm3LNHhCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_80), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___W83I2xs7lC32PrMs9bq4P2w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_81), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JKMGBJtXtDvc0NwxujFmZQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_82), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TA8WFV49atYpIneJatQWALw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_83), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nPenDL3j2Q6A1an1Cl3oCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_84), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TNkzce2Sd9bck2QRtketc8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_85), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kqRXw2WRJqDnfQK0N30ydw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_86), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BKnrQUIV2xGn2MO0RK09aUw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_87), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SCyrk9acEm3vLZhXCV1fGNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_88), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___erDe9aYc2BNxzH9brKlmtEBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_89), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HSAgkeH84eiEd8MfKIuBQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_90), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1AD3Wp47Hcdfg6PO2ac0NQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_91), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T11tCz9bIGT2CcftAwrDXZw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_92), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lS9bA1j3Ue6pp7sCliDsT8g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_93), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M3h9cTlVBrj2vakKBqQRlMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_94), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BBAyGuVoK6QA7nXfPUIYKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_95), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___g9b9arp3BWCGRHDe21SJso6w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_96), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___09aVguRR64dWfw4b6fKBcqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_97), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tgUnLdPVK0vRqC0pWxMClQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_98), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FBNsdfF5FNrY4P9cYQIfvZQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_99), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cB7zULPbG5vWWdCukRjdqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_100), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dpzmcz9a6kXbhFacdElIMOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_101), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AWFBEodxoi9a61KDUc9aiw1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_102), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vHbYzYlzLPcurSm0Hu8InQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_103), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nzT6Rke9c7tkW9b3XMmld2LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_104), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cCc2iMcL3MEBZTTL3LCW1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_105), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ahBYcGrhpPvM5dTdzCQBrQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_106), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XI9awM9a9aQ9cB9bcS7uDRsa1Rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_107), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cWNaGuyEpBbdBlD9b5nY1ug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_108), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6P67I9czJ9aa9aZzVyYWUiGlw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_109), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S4jE5dFDtcCC8ODzxaJk6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_110), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Msid9awGKVeVe7p3v7WfNQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_111), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xyRsdWsGY1DVVispyn0Xeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_112), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EPABzhs2B9atAvHV4CUTw2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_113), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2MhCcipNmSHgcDtN4cr8ng)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_114), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0ul9cDZYl7YkH1RhZBTd9c6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_115), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QFf4DPoOk6Jy59cL2OASJzw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_116), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7yDHbEsisDNKcqQHIRgOuQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_117), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GwVmUG4AZCEAP8dBk4TGHg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_118), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___q7DaQZqCe0lRO0rhBWzM0w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_119), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hGIvKp3CGssDQ2vSvfksxQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_120), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9c1P82lz6H9anMKDbz1vYNpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_121), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dbg9bsMENUwtF9aO45wEGG3Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_122), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ym0Pr6z8A9ajyOAgotpd9a9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_123), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___izqbVTMtpY7kMiTK4bPJ6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_124), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rouofEnBX1ok9aMXmOsKdHg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_125), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___C3GQZbey70223GyG307UFg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_126), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yxmLIVRKySYknm2wSBp9cpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_127), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8u7UPO7ZpaMkWoJRtZLlYQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_128), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xXT7cKE1NTiL4U2MdlA2yQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_129), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___44q9ak51X9b9bmuZ9cK4LsFWOg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_130), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___77dMna2dOod5LqwYkRMZGg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_131), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QXMcmOst45ThYFLo9cOKDiQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_132), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zldA3DCxzpAhONjlfz7iIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_133), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dnB3So2xw9c189c09a9cc9b4hxA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_134), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___r2gXVULKoAtQjkgjf0Z4wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_135), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VsLzrOz1nS9cRBBz9ccZfETQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_136), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tRSKshYob5uzZE3eBVe59cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_137), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vcbf2lEZaiSjbAHwgt9aKXw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_138), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sb2NV56uvmvOtYkgVsaVQQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_139), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7STLi75js8HXlmFg7Abt9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_140), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5O50gePV9adn3wgFGWjlOLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_141), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9a3Y7eeGNXkOCLUktwxzN9ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_142), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ng8dczn37bLzoM9bsVdPwjQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_143), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___boICAAvO1zkTlYDOuEaj6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_144), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LeuvM3mIc6pSNktpm9cHSVw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_145), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mxQQ2vwZhwfDagj5SEXeHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_146), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___x2NKZw9brJpylbwEtLfx9a9bg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_147), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmT2Gs9cB7RN9cmo9c9cBpfKsA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_148), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RiPFNabSvay09bAW4Jic2ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_149), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___efSHgbCUYoX1lUK7M9aj4Pg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_150), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vmgih7rhd9cXUC9cEBz2cwXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_151), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rB3209aHcqpT39anNUezpSjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_152), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___x85Q1O2QUnYbstPlxUCyAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_153), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___L3AeZ1n9aK4C1jsBCeaCmlQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_154), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ebmRHYtM9cCbYF6WvKDfQ9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_155), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qE1JtEDDOvP6J49a9cv9aK1Dg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_156), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ctvQ2lU9b9bnVVpNP4GhIo2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_157), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8bHx2qDxS2yWIId1X52mqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_158), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kTDR7D9c9aomjcaUQOmKJ9csg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_159), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1tj59chZC08k4TWYeZiqDnQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_160), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___533QKY9a8quvLM1SsLE1JfQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_161), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uFJUSitn9c1Tw6cF9cZf6x6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_162), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G8iCcDovsaw25PkF7wHs0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_163), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SY4U2QvmoQxocaG8MOmyHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_164), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bhkFYKbURxGcJnKpswdr2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_165), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lTsL0bi6njxzDh9c8A32r2w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_166), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___k4VEB3kaBL72FRQN8buzSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_167), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YbQIA9cHUESCyYT1WEeIVbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_168), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___66KauNYQRukYNgmb6bVXEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_169), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S550SlHmWbDpD7rs0J2lrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_170), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGnLi1DjaBomQ9c9a6MOCA5g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_171), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bEKtSmboScaCP8PPnlOWqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_172), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZpwWwpfBXgcQ6xoLOH4CJw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_173), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GHW5yjG8N9c2BQBun6aBJzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_174), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yup67SPGRVcwMdmZwc9cSag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_175), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ec65mR1N7BSL9cmUa3z9czvA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_176), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ibyK70G44kCK9cN8nAkxyGA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_177), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H9b69aGZGrLOiKWQdd30yQ9bg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_178), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Te7bvH18PbGe5siNJ9aDTTA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_179), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MUaBvSw0MHw3qQi9bYavAmg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_180), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bWYxjLMocXEvYgQQcC63rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_181), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZpcNBrQMfioSvQNxKHhu9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_182), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gywCjjjPZobIva6liQWNLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_183), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6PDHoyz05lEjxGNE0k0ikw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_184), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AXGsBlGV5DoEOwPJSl9bdJw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_185), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ygzR9aJ6oM1bZTq4Z2lNO3Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_186), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uYVc6UX8hcaEdrHosUQAOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_187), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AlV8xJkjCXujAUesHxezgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_188), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___L9asecuKwevQN2h9cWzyv6oA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_189), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nZD9cadh12dcqTFsXBHbCRg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_190), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dz1JHdrf1p9bPB9ad2dZBtYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_191), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0MUu7DVBoaLHTVUZe9bKoIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_192), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___29aIWEGnJW0wnITIeSKWfFg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_193), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n2CigWG38YNInkiL4n8g7A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_194), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bb3v9bDRLv9c9bcQzGH9c5H4Gw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_195), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tkJq8W3gQVDjuu9aT3THC6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_196), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oyQkqbRkRzo43y6iRevkaA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_197), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YuphtPwdJHG6BUJOVa9bX3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_198), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EQxs5xa4FNWtMfcvmFZ9cMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_199), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5YbjRZxm0g3SrdnL73aQaw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_200), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MEALpIIbc0cKMcjQ7Xckzg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_201), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yUc5o9ax9c9asIVNkfprLRPpA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_202), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4JrnABFfF3UTQ3nO9a6mXzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_203), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bkAwkKoaz09cAQo9arQjGA0A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_204), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7N9bV9cjVBHs9ciAhz7vgdI9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_205), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QX9cU2fNK0jJrZNDQKnAycA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_206), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vTbVjc6faJqdBrTckFLLWQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_207), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___v4k9cDtOUzGyUHJbnJ7kQKg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_208), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0ym49cR6ES8k9bYWsnh1fELA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_209), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Jx78R9a9anGvjjocCaP8YgIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_210), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___s0lnM9cZDB9bOREa4Fx1leBw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_211), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aT7p9bNEmP3LxrK3OhspnSw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_212), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mV75vMLuQ8rrQEUzNz6llA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_213), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jhVz7tKuf0heLM2D3nL0gw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_214), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___c4YKWXetPKpaUUF7Qft2gA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_215), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rCIIoKC0OrXhpuTFTIZn0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_216), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lXaYcLcHHuQ46VvpH6Qr2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_217), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___duX6hgjmpJtFFdvJVuoafg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_218), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GNSb4l0oRsR1gu66azz1LQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_219), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LGbUtKnsZL8FcQiQN7sWEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_220), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___e8Xf9ajw9cRlpuqnFnlEuSpA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_221), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nVQhtKHyPC8pvPbUAUBU7A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_222), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bI5GhokFUA9bgO9av819cgdBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_223), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qTicKO8EMC9cWGOyybIz4WQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_224), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yZHx0qMqBvbhmZ0fMuAP6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_225), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YQzyPnY5vKAqE2RyLX0cew)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_226), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cIILAsA6BeRrvHfloZIscg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_227), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IwDTuHqkGn7wW16ga2ktSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_228), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lbkoHJP5AIgE86vP7MmlKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_229), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9b84wNYrm79cLYfx9bsPNHjPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_230), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___K5ihI3kW9cFBh6sKlfEpJwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_231), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nEiBK88oEGnvYfkiei9cyJA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_232), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Psy1qActyEYmIhrRo2KkJA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_233), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cZzkwYphs086zWiuLotXLA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_234), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kPsYd8d9cco3hhqO7CEAFeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_235), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BbOsdTh4ZRNKmiISHDyg3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_236), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Py40oiVtYdIelNuiQQjpjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_237), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QzVlk7tEXgagMWC19aLvbkg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_238), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qxufH5vUl9aY2l9cFq39bnVwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_239), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jiTCvQQpgMU0bTrdVuECiw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_240), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n4OrLXC1r9a83k5wz2NoWxQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_241), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bJpxHYPJaxWBQn6QxwBA4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_242), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fOn9b5Ij3ytw2Ui9a2CPI5zw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_243), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zJU3FoYOdJ9bmuODPmqtgdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_244), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1MXpJAdeOMc2XMg5H7t9aSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_245), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VNAv31sqVgxrd9aXeFF5wYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_246), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MULS9c8dKz2mJ1U9a9cMyTCYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_247), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5TB09c2Iz60T0YagbSbI5RQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_248), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NIzUqj4Mr1E3EKy0AkJaXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_249), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yQdCkIARIVr9aqI8oVxi9cQw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_250), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WYvjnWcyRjjjI0lasIi1YA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_251), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hR4oq6WdDjEl0JIvQtvUlg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_252), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___84GQPNcrIJtbrzuA7JnMPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_253), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SqZEI7bxySjmJX4GsXyvKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_254), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___c1f569aWpTd825BTnv9bq4Xg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_255), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ibl3qMPOrpGT2x8X7vmbeQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_256), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bBcuDHMXr6Kz1tr7BzD9aKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_257), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aDvifvZOUmduC6Unfm69bKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_258), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5kuxCbMO8PVJc9aJbXScUOQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_259), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Uu9cBz7dxPVDFhF9aLzWecyQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_260), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WWt3il4CHPiYP10KdNLrWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_261), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hc7hMh137dtaNdd3qw28EQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_262), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XWz49cQA2QiZaLkqHBU5L3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_263), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Au81R9a68Rv3gwlPtvDarPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_264), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yw741acxvsUs9cOX9cuiDj9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_265), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T9caGByKkBhaXSZ6fCJLIdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_266), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JmTWN8YiVKTZuvCYW2XNZA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_267), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Kbv8OIo8zpawh7SNMbfgkA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_268), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___B0OBOTOJQENvDd71LJ9b19bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_269), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___22ELRKd9bDuNug6qvIihS3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_270), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ddrHnMlEhcHznkXv27msmQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_271), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yhJ9aDxHfJqHvWO0i6N9bukQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_272), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MLJpsW0DAZYB8lAgq09cUjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_273), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8tWfSjtTOlDafxpQPvChAA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_274), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xKLwwPkFSVy2Dtn9cuJ78xw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_275), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hdRijZdoPR3UGq9aUw2zFDQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_276), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZjQc8bFVF8ePFYxjN0iVVg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_277), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SiqB8gWmdYKb4vtgqYrrMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_278), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2Ixv9aZ9bvpNaVAVzYBJlUPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_279), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HoXSbgR7plMG7Fef0fcy9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_280), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H1Ma2EXqegHnMqzJZ4SA1g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_281), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jpXTCDNVjIi5r4hbHN5SVQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_282), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4L62Yp9bLO2ZDcvBG9bSvP9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_283), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MCSdS9cTdQvttqiM9azLzkDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_284), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E9bSTz8DQ4tgiLV9avQjFgFA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_285), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3CQpPXVDiNqC3jKO8Juliw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_286), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___w50CkyHBltcyR8rWxttZCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_287), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fmEfDTfNDkVDxWi9c0O6D2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_288), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___k9bgPIs43oLgxnk1l4TNQaw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_289), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5MqeIopvDuA9aozxL79cQ88g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_290), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Zp9bMZDO5tEkvVLTxiKsBkA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_291), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___j5FZyaqnqjc2dcsUkAp28Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_292), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EbvvG9awBeRKzx8xuBIb7TA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_293), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9a8besSQa09cOOt9b9cgdVwY9aQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_294), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oVKF7oq59cRGAaMpvWzNWbw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_295), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7ru3bwKuSx4Sc8ilsBmX3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_296), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MDIdJXTVckPj57aO7LMVgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_297), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vQDE0VOBftnrpkVsM9cme4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_298), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bmR9bM9b0qqEqU0QJKnmLQnA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_299), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___88tWbH31SmOWJjgJ7RnfHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_300), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___t1CB59bEwlxfHZhNwNNz1bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_301), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YbLM7ZajsWOFLl4iSo0Krg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_302), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rH7Ns9bqAnnfkukwBIlz9bKg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_303), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zx9ctq3Ffe9aysjoWhZOzevQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_304), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T9a21DAzFCa3OqRooKKtkqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_305), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Y4DThr9bpMbmoKpvgT1rYwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_306), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___811qrD9bMr21weOkImaKvIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_307), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YNifhKTQWQRf1atK7E3Qmg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_308), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YfbBxPLyPvVS6F2y9bSUFIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_309), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___OBvl4G6evYkvK9b9bClFGqNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_310), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___pHsLkkx9bTDctZjmJqwCYRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_311), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ksH6NowTz9bh4eMOdyaiR1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_312), args, argsLen_0);
return result;
}
static N_INLINE(void, nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory)(void* a, int v, NI size) { void* T1_;
T1_ = (void*)0;
T1_ = memset(a, v, ((size_t) (size)));
}
static N_INLINE(void, nimZeroMem)(void* p, NI size) { nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory(p, ((int) 0), size);
}
static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s) { NCSTRING result;
result = (NCSTRING)0;
{
NIM_BOOL T3_;
T3_ = (NIM_BOOL)0;
T3_ = (s == NIM_NIL);
if (T3_) goto LA4_;
T3_ = ((*s).Sup.len == ((NI) 0));
LA4_: ;
if (!T3_) goto LA5_;
result = "";
}
goto LA1_;
LA5_: ;
{
result = ((NCSTRING) ((*s).data));
}
LA1_: ;
return result;
}
N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__9bihNFg7Qajcg9arfx5cr9aHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, FILE* f) { NIM_BOOL result;
tyArray__9bKy7UA2LOi2vzOViufaW1Q buf;
NI bpos;
NI blen;
NI btotal;
NI rtotal;
NIM_BOOL T27_;
NI T28_;
{ result = (NIM_BOOL)0;
nimZeroMem((void*)buf, sizeof(tyArray__9bKy7UA2LOi2vzOViufaW1Q));
bpos = ((NI) 1024);
blen = ((NI) 1024);
btotal = ((NI) 0);
rtotal = ((NI) 0);
{
NimStringDesc* s;
s = (NimStringDesc*)0;
{
tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack;
if (!!((r == NIM_NIL))) goto LA4_;
stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1);
asgnRef((void**) (&stack->data[0]), r);
{
while (1) {
NI T8_;
tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it;
NI spos;
NI slen;
T8_ = (stack ? stack->Sup.len : 0);
if (!(((NI) 0) < T8_)) goto LA7;
it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack));
{
while (1) {
NI T11_;
if (!!(((*it).left == NIM_NIL))) goto LA10;
stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_));
T11_ = stack->Sup.len++;
asgnRef((void**) (&stack->data[T11_]), (*it).right);
it = (*it).left;
} LA10: ;
}
s = (*it).data;
spos = ((NI) 0);
slen = (s ? s->Sup.len : 0);
rtotal += slen;
{
while (1) {
NI n;
if (!(spos < slen)) goto LA13;
{
if (!(bpos == blen)) goto LA16_;
bpos = ((NI) 0);
blen = readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA_2(f, ((void*) ((&buf[(((NI) 0))- 0]))), ((NI) 1024));
btotal += blen;
{
if (!(blen == ((NI) 0))) goto LA20_;
result = NIM_FALSE;
goto BeforeRet_;
}
LA20_: ;
}
LA16_: ;
n = (((NI)(blen - bpos) <= (NI)(slen - spos)) ? (NI)(blen - bpos) : (NI)(slen - spos));
{
NIM_BOOL T24_;
T24_ = (NIM_BOOL)0;
T24_ = equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem(((void*) ((&buf[(bpos)- 0]))), ((void*) ((NI)(((NI) (nimToCStringConv(s))) + spos))), ((NI) (n)));
if (!!(T24_)) goto LA25_;
result = NIM_FALSE;
goto BeforeRet_;
}
LA25_: ;
spos += n;
bpos += n;
} LA13: ;
}
} LA7: ;
}
}
LA4_: ;
}
T27_ = (NIM_BOOL)0;
T28_ = (NI)0;
T28_ = readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA_2(f, ((void*) ((&buf[(((NI) 0))- 0]))), ((NI) 1));
T27_ = (T28_ == ((NI) 0));
if (!(T27_)) goto LA29_;
T27_ = (btotal == rtotal);
LA29_: ;
result = T27_;
}BeforeRet_: ;
return result;
}
N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__Wiam9c8x73Mtmbj0r4Ppikg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename) { NIM_BOOL result;
FILE* f;
result = (NIM_BOOL)0;
f = (FILE*)0;
result = open__gq12VLhVO0NBzUTnGgz4nw(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 0), ((NI) -1));
{
if (!result) goto LA3_;
result = equalsFile__9bihNFg7Qajcg9arfx5cr9aHA(r, f);
close__fU6ZlJAtQ9bre04EDZLdGsA_3(f);
}
LA3_: ;
return result;
}
N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRope__LLRRC42xWBSkxzV9bsPu7lA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* head, NimStringDesc* filename) { NIM_BOOL result;
FILE* f;
result = (NIM_BOOL)0;
f = (FILE*)0;
{
NIM_BOOL T3_;
T3_ = (NIM_BOOL)0;
T3_ = open__gq12VLhVO0NBzUTnGgz4nw(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 1), ((NI) -1));
if (!T3_) goto LA4_;
{
if (!!((head == NIM_NIL))) goto LA8_;
writeRope__FwuzOBq6SLlanVUstm8q9cA(f, head);
}
LA8_: ;
close__fU6ZlJAtQ9bre04EDZLdGsA_3(f);
result = NIM_TRUE;
}
goto LA1_;
LA4_: ;
{
result = NIM_FALSE;
}
LA1_: ;
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T3CpMgcFHzYracJ80CUZBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_313), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6wQcdZnh9aH29ay5rwY6M5fA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_314), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y39ant8iE9bjKB0kbkRCAibQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_315), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RKXvZR1cmZW5dfjtFQCG3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_316), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nEA33x9cMfuJw3ZiGbn25iw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_317), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0xK6HolrLvVFWil73hZYbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_318), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z2c9cvs0wVVVqTEZ3Qwe9bfw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_319), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AxDJCYpgPoquRsZtiOnpRw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_320), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dU9cenGIcVUltUO1088LhYQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_321), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TLpRy9aDJ1Ni4vccOIoiMbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_322), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RzB0z3UV9bb4kXUEGyS9crRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_323), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z1QwTAihBHnxe59cytXnhmw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_324), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XZnCV59at0sqX6ShEjlFLgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_325), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YLzwVVtf4fuPYZVeMQOa0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_326), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CtS8L8cOLTsSuQ10mtHsvw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) NIM_NIL), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mPpmmd13MIZLTbd1oOdSkw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_327), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Th3qC4WgcAhWPSlLw7vZ9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_328), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3RPy0XXevrEBMts1Mb9arGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_329), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gqwqalZtiJtCgAF9bY5S6qQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_330), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G9bYX9bu7ufcttiARCDUJ0qg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_331), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___W0CV9bE9bNiLgazfFZjoQCBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_332), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ecC7jlB6gBWrt0K9byHohPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_333), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hFzCKQOJ8Eao2AJk5HOvxA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_334), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___62079cK9bsws1aAJqEmAGo6w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_335), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hO1UTpWJhaojnhUyqfmgPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_336), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___wlKCT75QSpBNooI9a2xvWeQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uD0SC9bUeWpB9cK7V1aBT9aNQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_337), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Uez7zQbKzeDFToq2Yh43bA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_338), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JbygmsEkVsyK85BPVFvwbg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_339), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FLXrAGf7HFTHIGh8Xuickg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_340), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hmfCuT8fgBmRlPR25L7ZOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_341), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HUHatwko3S0fuszXQAOSQQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_342), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gGKEcvCOVzpTQoSXzO01Dw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_343), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LMnNsJkYlruXHnF5LV9c3pA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_344), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uJ11bTQ8dBBAX88A2cyICw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_345), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2D3IUNoEAKKLxuRqVNosPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_346), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___o7SGM9buciKf5BOjTvMKA7w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_347), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ht0mWR3LosfEZ8SopJcmEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_348), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GweM9byC8cQI9cehUzlYVs5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_349), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Xnze9a4kYSwHurdPnhyNGzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_350), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGaOrvR5YSM9cGUajaqcNOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_351), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GF60428RM29aXV0LYutm9aOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_352), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ciTj4q9cGhcXiXY9bPemZVvw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_353), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HLoe040Vi0LPzmTid9aLGdw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_354), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tnP9cO5PduJRSEeqtm9bocEg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_355), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S6XcU2shl8EfYxL7utXbwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_356), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3GvB8fuMNh8BXF8IoORCxw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_357), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RhAtD9c9aECDorIc8rDhMF9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_358), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CSdlEV0i9aXEHNuC1G9aIEbw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_359), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4SLS9cx2c8VCFIilepFlOeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_360), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___amX0pef5rA4JAmWZ6ZB2Nw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_361), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xAta147ahLKNrJMPPP5B6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_362), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sshAiIx49ba6saVSAWuyFuA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_363), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmulmJw2SZspd0rz2PYvQw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_364), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UFeu00R8dNoyzL8vy54mnQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_365), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qYiwFpynEwFeSf3Aa2sS0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_366), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6xseTZmgyslBQb6RMm9b4wA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_367), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KsZXXO4zKP47iruPcSEryQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_368), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TUxzei0sBfo3GESRTg1T5w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_369), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ikDBM4Dyw9c2kuwAAswRyOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_370), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ht9cduX4yJQKi2Gi685ag5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_371), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Wsnl5zC9cCEBdwJcHgpLf0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_372), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___deWmrKhbFG0MxH9cDr9cnhfQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_373), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HiCTlq0dXhMZvpDtUGWGQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_374), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aagcnoz4kFWlzsoVgR9b0NQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_375), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oYhFcOWR4tEylepRJJLrlA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_376), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3RBmOS8xzFTxpuGVryQycg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_377), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___apXghcMDCUp9col7jN5spHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_378), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cNvJ1SVovK9b29bKmwKyiijw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_379), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0mbMVYCe5Qwl9aQOKV3sh3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_380), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___03lrwELd9clj29bFkdXAVxkw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_381), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8croAZ6oMdSPXHbIisuppw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_382), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TDLJ9ciKDBoW4ouZs855Csg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_383), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Mk2KRdMWX4H3L9aBEG2elgQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_384), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___pFXgvxsz2L5f27ImZwJwzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_385), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n9aTlv49bCxoRKQNZiWsaW2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_386), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y3oNivo8px1XzxmB9b2OY5g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_387), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Pnqkcr360suaX84kwXMuCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_388), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FA4ohw0aOufzzLhmw9aUAhA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_389), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SWZi8EY4Pz39bBPSp9cbtZMg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_390), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XaBXRInsoVU7DBc2WK8dzg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_391), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NdMO5d09brFwLfDc8ciTSqQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_392), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E62TlyqwqpEwqcA0YTjttw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_393), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___m4T7v0qnGpOgwmMenKcgwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_394), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SKTmZPSgcdPr3Du3ia9b9czg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_395), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ItxAXpnPzfUbYRPsHgKrPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_396), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ggqZXIgPaS71ubw22cYODw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_397), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LLnl4aDVJynim7LQvfJKLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_398), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ob6yLhv7QvbU9bdZj8Nw2kA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_399), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qfsHROU9aHSaYGq3tpw1XDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_400), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___j9bcJJvtd9bur0VZUQL3ibgA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_401), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___32ITt7hKDrhn9bXvKbmnE9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_402), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZAOkVi5SmgPcGpCSuSRXVA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_403), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___smDIOmjGgf8ZP9bfDyv43bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_404), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1jtIbjhXi2wH1iWPyC9bgAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_405), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NPgb4kECDcV8MICSil6Rjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_406), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cQHGAtgSLYV7mm9bnVGYGRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_407), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Q4LBu2cVl8IcNTrtxd6B6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_408), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M36w8F9bFwighD3K39bvtVWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_409), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Wm11wQtuJBQgTy9a39apz0eA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_410), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0bUw514mSumiNnSjkD0bqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_411), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6hxDi5nlebu1DFLqpYq5lw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_412), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GkWgkK8SyjrFfWjGRwKWrw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_413), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oubCLvBtU9aRB9bhG2vbCDeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_414), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KTcAQx04UE87HYZ48ZBm2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_415), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Y6zpqvbZwK8tJZiKs9agbGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_416), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2OGTIxEeE0xFVRpz5TxKyg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_417), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0xZtTB2eXM1dRd9aneL5VPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_418), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___amO46kEKgIeOmW50ayV6nA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_419), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3lABfXU9aXZsyfylYizY8KA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_420), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5JCQx3oDHEcLdsEz6Rx0Rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_421), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dTtf7fil83VcW2Mkkr7scw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_422), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___88NG6Rr5xfTcA6hqLfZ2iw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_423), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1DWSTPxvqlc4A2xRDmjZDw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_424), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y5Z6ewsHLxj9ctzxTLPCLmw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_425), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CHBd5pGE9c8nq4KNqM8K48g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_426), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y2h2X887dhz5sEoD4C8ezQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_427), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dQfg2HrsVY6E7P22Nis1MA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_428), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0b2Bm7vpM8YAMKp9cuAwg3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_429), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1Hh3EN9c4pkzdKB09bo9c9aTBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_430), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AOSgPOjXfsLWEICRXv3U2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_431), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gN4yb6p4ql6iVJOPAjLEJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_432), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WIg2bxfQLkmzIdOv1JkRqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_433), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3Klw9agVDELeF44OQ6PnRiA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_434), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LL6jCaqBGLwC1sCgmCAEhQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_435), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S9b9bs03lj0NJlhXUmrylsnA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_436), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fphSfWWyYSWLARtGIpYB9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_437), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___As9aDT7fkqstj16MQnIGPhA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_438), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___eAZ21NmzzIsugeSSkcxIkQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_439), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___D2dSwFjTnRSmeKOoMm6w0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_440), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HlU9bV2X0HOPcGJnQlGm9c9aQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_441), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___p2lIQAdDBUpuVZML6ecUOg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_442), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5hzyGWCNjqgqPj0O7sSnkg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_443), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___l1wvVBeU1Nnie8cWddgPCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_444), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yVZN2jQzbJwg3E9cehLff9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_445), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E9br9b8BVYaWzg6CXcn9c6EXw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_446), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qPugJ1Nc2L1EdGwEF0AJ0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_447), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HzZyrXo2QFynm1T8X76cCw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_448), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___O2nyVw4tGD6MMc6u7I9bH9cA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_449), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IQYZUimFiAV9axFM9c64hKjA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_450), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RCJU8UTq9cE0Jsi59anAbTIQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_451), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S6vmSaSCgC4V2L5H7OWeZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_452), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Eqr9cgWCkrZrUG3sg0CawIQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_453), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9c1lq60gbfPY9cyjQN4YouTQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_454), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1nMXoOe6cENU7004pnh6wQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_455), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ALynLzo8zWvno8ZxASdm4A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_456), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tlkWMVJPsx9aWUbp8FMjQ4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_457), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xPW5KjObCPL2lJmHFoqfjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_458), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mTh2rYVPWUnI8B7kU3NWUg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_459), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aoMj8hrcFi4HlPDZ9a9alpig)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_460), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yj64cHk9ajrzJI39bfpBfOVA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_461), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bY6R9buTsrqJYQAuD39cegOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_462), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___U9b6hkqS6N7XIWr0gy8z9bug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_463), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2MwhwhkHOiavfXQl9aey8nA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_464), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JRV6DlpqdegYGLcFjNPv0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_465), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ryMkoQkM4zAjyp0800DrDQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_466), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___iW9bjdQoXkul7L0e76qo8XQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_467), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___i3z9am8Hzy69bSo575pRdzGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_468), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SkAQPSnCyiRvin57XULW4A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_469), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Bym8FwH29aQE8fth9ar38yJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_470), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CbbQqCp6itJgwKVRfTr69ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_471), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HWgoOloM1oqcI9aZ9bEkoBhg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_472), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Anf1UHjOzz9aHgMOgtnEPZA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_473), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tDrtnFWakp63hyE9cfImgZw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_474), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JwpI2xnYNfR68HstfDi1yQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_475), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___23SvbIxPpf5MIOga79arr6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_476), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uVZXJGmbOGIG9bfkI4ZDwJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_477), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UxL9a0Hh7Km0Z0DIk7hp9cBA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_478), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QxiH9aM0po7vA19b2s1CjdEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_479), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FZt89ajG3TKAhfL9aW4s7hcA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_480), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5GaE39bOOeQZy3EFOEIy5QA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_481), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SA9cvbR3uc9cP50nnaEBJctw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_482), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KweYGQ9bFYg76nmoxpk8ksA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_483), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AhY63HjLy2bPe9bslUNBuBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_484), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3m7YwdrxIvOkmvfnm5JYSA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_485), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TEWiK8QWtRTCIQ9av7sW8LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_486), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9an6bUHwpxqyL2kgNHX3MEg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_487), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kLwAORKb0c4oFgFTN9aEN8Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_488), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Qm29ctdy9c4sqKctTsqiBWIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_489), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UyNt2Asj9aa2ScoGVo9cCnNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_490), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xXvQyblNYV215UGR9cTka7Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_491), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LYjQOKn1i9ccw8AFlvPGkCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_492), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___THj0xNXkqJf6reD7exsGbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_493), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3oFXAbir9c7XcKzu9bpgAM9bA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_494), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4sbi76q7ZLqpKbD3pwJ59bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_495), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Q9cOQGrP4lOdbYHXMQ1yZtg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_496), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0AX4Q6cA8nOXUagvzFqt0A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_497), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qQ3g8SwjZoIFAay85NaiEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_498), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M0TByFCTj9bbOkDSRpFz3LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_499), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___OikfyLf8HmjI9auYLFoaVqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_500), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3KVF9aLACI1h11BqZrkzjNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_501), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ial810twbEzfkHaHMFYNCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_502), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z7wCJf0WipOQOQ4ZZNBIEw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_503), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Xpm9cGf2grEXdjAQV9arqWBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_504), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sqxyWwlLrfrdyc9b3BINcXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_505), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ztLQ2Orupb9b9b3KrCvoK9cbQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_506), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PI6febxsdTbySkLsIEqHKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_507), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGRyuC9caCxfdM1i8W4fjgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_508), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vWWA89aSvs5QwAFN4Jdr2IA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_509), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRopeIfNotEqual__Wiam9c8x73Mtmbj0r4Ppikg_2)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename) { NIM_BOOL result;
result = (NIM_BOOL)0;
{
NIM_BOOL T3_;
T3_ = (NIM_BOOL)0;
T3_ = equalsFile__Wiam9c8x73Mtmbj0r4Ppikg(r, filename);
if (!!(T3_)) goto LA4_;
result = writeRope__LLRRC42xWBSkxzV9bsPu7lA(r, filename);
}
goto LA1_;
LA4_: ;
{
result = NIM_FALSE;
}
LA1_: ;
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lQuk161wRVxbYxfH80Iwcw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_510), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UQrwMIIitnm9cEflSXdCkPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_511), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___A9aKFJUF6ZjJQfrcPHJigOQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_512), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8ehuHmXS8omgqFrdYMsPBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_513), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2Opo6JkHmCRmDA87qcGfvg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_514), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___C7jQ1fH79bR8HRQrbJjFKDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_515), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2eu2gmgXiDUZkBgTVqD7pg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_516), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cCI1wZSoDB14achJW7ZFSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_517), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dkLAWa1dMAcGEAyfUZ59bRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_518), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___DuvwOyJJ9b2gpVM9cV7DCFSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_519), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4MBgNtJLOyqbjfGytl2OTw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_520), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___336bx9aXX7GZckfWQE5Jy3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_521), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IbsmsXdtDOH7pLpzh9cmAOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_522), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cGelOO9b6sliTnobJf6XAsg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_523), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aNorSJCSJyyDo7w0s6eynA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_524), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BYRFs7dwiqyMIzbsx9cDq8Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_525), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TavFv5xK0dxxJCk9b4v34zg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_526), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9aAWQyBOqadJYgBT29bzliAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_527), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zpFS2Xy9cmoAoqCFSUQj1gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_528), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Nz9cwOtMmcX2gklRogKhyEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_529), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YGYo0XYmypYw3N26AYh7ug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_530), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___e8Z4ajz6IErIB0a6mpq4Wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_531), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___eqn09cqDPu9csxGUOSa2untg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_532), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rZ5o6ziDKz4d3bfaN54Dgg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_533), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YGa4o1aenD9cjoU03CAgtqQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_534), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___b2PLtFwpZkVmYhHWvW4i1Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_535), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ctY4Nx9aQFC9bl9c2wbRLoFYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_536), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xsFAphqq4CRpmuZ79bXVLrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_537), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SSpcZv60d0mAp5H4Mb5hpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_538), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TtzOadDB4I9a89cWej19a2PNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_539), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KKiSvh9a121M0uSQjcJhhMg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result;
result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0;
result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_540), args, argsLen_0);
return result;
}
N_LIB_PRIVATE N_NIMCALL(void, compiler_ropesInit000)(void) {
{
nimRegisterGlobalMarker(TM__Vw9cfUOQOae9b9bzZBlucMZQg_3);
gCacheTries__5GfZTThHPBfB9bjRZdFluBw = ((NI) 0);
gCacheMisses__fLRm9am8S0daYBVNK6JKyBg = ((NI) 0);
gCacheIntTries__opyfsNv023Md1P05mqsDew = ((NI) 0);
}
}
N_LIB_PRIVATE N_NIMCALL(void, compiler_ropesDatInit000)(void) {
static TNimNode* TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[4];
static TNimNode TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[5];
NTI__OFzf0kSiPTcNreUIeJgWVA_.size = sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA);
NTI__OFzf0kSiPTcNreUIeJgWVA_.kind = 17;
NTI__OFzf0kSiPTcNreUIeJgWVA_.base = (&NTI__ytyiCJqK439aF9cIibuRVpAg_);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[0] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1];
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].kind = 1;
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, left);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].typ = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].name = "left";
TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[1] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2];
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].kind = 1;
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, right);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].typ = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].name = "right";
TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[2] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3];
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].kind = 1;
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, L);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].typ = (&NTI__rR5Bzr1D5krxoo1NcNyeMA_);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].name = "L";
TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[3] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4];
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].kind = 1;
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, data);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].typ = (&NTI__77mFvmsOLKik79ci2hXkHEg_);
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].name = "data";
TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].len = 4; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].kind = 2; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].sons = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[0];
NTI__OFzf0kSiPTcNreUIeJgWVA_.node = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0];
NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.size = sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*);
NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.kind = 22;
NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.base = (&NTI__OFzf0kSiPTcNreUIeJgWVA_);
NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.marker = Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ;
NTI__USLYl0Lpkimm4FABiJ3ldA_.size = sizeof(tyArray__USLYl0Lpkimm4FABiJ3ldA);
NTI__USLYl0Lpkimm4FABiJ3ldA_.kind = 16;
NTI__USLYl0Lpkimm4FABiJ3ldA_.base = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_);
}
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _val = (v4f32)__msa_ld_w(r0, 0);
__msa_st_w((v4i32)_val, outptr, 0);
r0 += 4 * 2;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_msa(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_binop__islt_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp64)
// A*D function (colscale): GB (_AxD__islt_fp64)
// D*A function (rowscale): GB (_DxB__islt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp64)
// C=scalar+B GB (_bind1st__islt_fp64)
// C=scalar+B' GB (_bind1st_tran__islt_fp64)
// C=A+scalar GB (_bind2nd__islt_fp64)
// C=A'+scalar GB (_bind2nd_tran__islt_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_FP64 || GxB_NO_ISLT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#include <assert.h>
#define N 10000// max number of elements
#define NDEBUG
#ifndef NDEBUG
#define DEBUG(cmd) cmd;
#else
#define DEBUG(cmd) ;
#endif
int read_from_file(char* filename, int* arr)
{
FILE* fin = fopen(filename, "r");
if (!fin)
{
fprintf(stderr, "ERROR: Cannot open file %s\n", filename);
assert(fin);
}
int value = 0;
int n = 0;
while(fscanf(fin, "%d", &value) != EOF)
{
arr[n++] = value;
assert(n < N);
}
fclose(fin);
return n;
}
void read_from_keyboard(int n, int* arr)
{
printf("Enter numbers: ");
int value = 0;
for (int i = 0; i < n; i++)
{
scanf("%d", &value);
arr[i] = value;
}
}
void sort_task(int arr[N], int n, int low, int high)
{
if (low < 0 || low >= high)
return;
DEBUG(
int thread_id = omp_get_thread_num();
printf("[Thread %.2d] %d %d\n", thread_id, low, high);
)
int pivot = arr[(high + low) / 2];
int i = low - 1;
int j = high + 1;
while (true)
{
do {} while (arr[++i] < pivot);
do {} while (arr[--j] > pivot);
if (i >= j)
break;
int tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
}
#pragma omp task firstprivate(low, j)
sort_task(arr, n, low, j);
#pragma omp task firstprivate(j, high)
sort_task(arr, n, j + 1, high);
}
void sort(int arr[N], int n, int low, int high)
{
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task firstprivate(low, high)
sort_task(arr, n, low, high);
}
}
}
int main(int argc, char** argv)
{
int n = 0; // default number of elements in range
int arr[N] = {0}; // initial array
int sarr[N] = {0}; // sorted array
if (argc == 3) // input
{
if (strcmp(argv[1], "-f") == 0)
{
n = read_from_file(argv[2], arr);
}
else if (strcmp(argv[1], "-n") == 0)
{
n = atoi(argv[2]);
assert(n < N);
read_from_keyboard(n, arr);
}
else
{
fprintf(stderr, "ERROR: Wrong input.\nExample ./run.out [-f {filename}] [-n {length < 10000}]\n");
}
}
else
{
fprintf(stderr, "ERROR: Wrong input.\nExample ./run.out [-f {filename}] [-n {length < 10000}]\n");
return -1;
}
assert(n);
sort(arr, n, 0, n - 1);
for (int i = 0; i < n; i++)
{
printf("%d ", arr[i]);
}
printf("\n");
return 0;
}
|
shared_array.h | #ifndef OPENMC_SHARED_ARRAY_H
#define OPENMC_SHARED_ARRAY_H
//! \file shared_array.h
//! \brief Shared array data structure
#include <memory>
namespace openmc {
//==============================================================================
// Class declarations
//==============================================================================
// This container is an array that is capable of being appended to in an
// thread safe manner by use of atomics. It only provides protection for the
// use cases currently present in OpenMC. Namely, it covers the scenario where
// multiple threads are appending to an array, but no threads are reading from
// or operating on it in any other way at the same time. Multiple threads can
// call the thread_safe_append() function concurrently and store data to the
// object at the index returned from thread_safe_append() safely, but no other
// operations are protected.
template <typename T>
class SharedArray {
public:
//==========================================================================
// Constructors
//! Default constructor.
SharedArray() = default;
//! Construct a zero size container with space to hold capacity number of
//! elements.
//
//! \param capacity The number of elements for the container to allocate
//! space for
SharedArray(int64_t capacity) : capacity_(capacity)
{
data_ = std::make_unique<T[]>(capacity);
}
//==========================================================================
// Methods and Accessors
//! Return a reference to the element at specified location i. No bounds
//! checking is performed.
T& operator[](int64_t i) {return data_[i];}
const T& operator[](int64_t i) const { return data_[i]; }
//! Allocate space in the container for the specified number of elements.
//! reserve() does not change the size of the container.
//
//! \param capacity The number of elements to allocate in the container
void reserve(int64_t capacity)
{
data_ = std::make_unique<T[]>(capacity);
capacity_ = capacity;
}
//! Increase the size of the container by one and append value to the
//! array. Returns an index to the element of the array written to. Also
//! tests to enforce that the append operation does not read off the end
//! of the array. In the event that this does happen, set the size to be
//! equal to the capacity and return -1.
//
//! \value The value of the element to append
//! \return The index in the array written to. In the event that this
//! index would be greater than what was allocated for the container,
//! return -1.
int64_t thread_safe_append(const T& value)
{
// Atomically capture the index we want to write to
int64_t idx;
#pragma omp atomic capture
idx = size_++;
// Check that we haven't written off the end of the array
if (idx >= capacity_) {
#pragma omp atomic write
size_ = capacity_;
return -1;
}
// Copy element value to the array
data_[idx] = value;
return idx;
}
//! Free any space that was allocated for the container. Set the
//! container's size and capacity to 0.
void clear()
{
data_.reset();
size_ = 0;
capacity_ = 0;
}
//! Return the number of elements in the container
int64_t size() {return size_;}
//! Resize the container to contain a specified number of elements. This is
//! useful in cases where the container is written to in a non-thread safe manner,
//! where the internal size of the array needs to be manually updated.
//
//! \param size The new size of the container
void resize(int64_t size) {size_ = size;}
//! Return the number of elements that the container has currently allocated
//! space for.
int64_t capacity() {return capacity_;}
//! Return pointer to the underlying array serving as element storage.
T* data() {return data_.get();}
const T* data() const {return data_.get();}
private:
//==========================================================================
// Data members
std::unique_ptr<T[]> data_; //!< An RAII handle to the elements
int64_t size_ {0}; //!< The current number of elements
int64_t capacity_ {0}; //!< The total space allocated for elements
};
} // namespace openmc
#endif // OPENMC_SHARED_ARRAY_H
|
GB_binop__bxnor_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int64)
// C=scalar+B GB (_bind1st__bxnor_int64)
// C=scalar+B' GB (_bind1st_tran__bxnor_int64)
// C=A+scalar GB (_bind2nd__bxnor_int64)
// C=A'+scalar GB (_bind2nd_tran__bxnor_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT64 || GxB_NO_BXNOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
filter.c | /************************************************************************************
File: filter.c
Includes the functions required to filter the page.
***********************************************************************************/
#include <stdio.h>
#include <math.h>
#include "pixmap.h"
#include <omp.h>
#define NUM_IT 100
/* Copy page in_page in page out_page */
/***************************************************/
void copy_page(page *in_page, page *out_page)
{
int i,j;
for (i=0;i<in_page->h;i++)
for (j=0;j<in_page->w;j++) out_page->im[i][j]=in_page->im[i][j];
}
/* Apply the filter to a page */
/***********************************************************************************/
double filter_page (page *in_page, page *out_page)
{
//////////////////////////////////////////////////////////////////////
/* TO DO: code to apply the smoothing filter to the page */
/* the filter will be applied to page in_page (generates out_page) */
/* the function returns the average value of the pixels of the filtered */
/* page minus the minimum pixel value */
/////////////////////////////////////////////////////////////////////
double min = 257;
double avg = 0;
double value = 0;
#pragma omp parallel for schedule(dynamic, 4) reduction(min:min) reduction(+:avg) private(value)
for(int i = 1; i < out_page->h-1; i++) {
for(int j = 1; j < out_page->w-1; j++) {
int m[] = {
out_page->im[i-1][j-1],
out_page->im[i][j-1],
out_page->im[i+1][j-1],
out_page->im[i-1][j],
out_page->im[i][j],
out_page->im[i+1][j],
out_page->im[i-1][j+1],
out_page->im[i][j+1],
out_page->im[i+1][j+1],
};
double sum = 0;
for(int t = 0; t < 9; t++) sum += m[t];
double value = sum / 9;
out_page->im[i][j] = (int) value;
if(value < min) min = value;
avg += value;
}
}
avg /= ((out_page->w-1)*(out_page->h-1));
return avg - min;
}
/* generate the filtered page */
/**************************************/
void generate_filtered_page ( page *in_page, page *out_page, float limit)
{
generate_page(out_page,in_page->h,in_page->w,BLACK);
copy_page(in_page,out_page);
/////////////////////////////////////////////////////////////////////////
/* TO DO: generate the filtered page from the original page */
/*conditions to finish the process when applying the filter: */
/* (a) to overtake the number of iterations */
/* (b) the average value for the pixels minus the minimum values is */
/* smaller than the limit */
/* store the final page in variable out_page */
/////////////////////////////////////////////////////////////////////////
double avgMinus = 0;
int i = 0;
do {
avgMinus = filter_page(in_page, out_page);
i += 1;
printf("%d -> %f\n", i, avgMinus);
} while(avgMinus > limit);
}
|
Example_teams.6.c | /*
* @@name: teams.6c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
extern void init(float *, float *, int);
extern void output(float *, int);
void vec_mult(float *p, float *v1, float *v2, int N)
{
int i;
init(v1, v2, N);
#pragma omp target teams map(to: v1[0:N], v2[:N]) map(from: p[0:N])
#pragma omp distribute parallel for simd
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
output(p, N);
}
|
flux_avx512.c | #include <stddef.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include <mathimf.h>
#include <immintrin.h>
#include "geometry.h"
#include "bench.h"
#include "phy.h"
#include "core_kernel.h"
#define MAG0 (0.5 / 3)
#define MAG1 (-MAG0)
static void
_KRN_ComputeFlux(
const size_t nfnodes,
const uint32_t bsz,
const uint32_t *nfptr,
const double *f_xyz0,
const double *f_xyz1,
const double *f_xyz2,
const uint32_t *ie,
const uint32_t *part,
const uint32_t *n0,
const uint32_t *n1,
const double *x0,
const double *x1,
const double *x2,
const double *x3,
const double *q,
const size_t dofs,
const size_t snfc,
const uint32_t *snfic,
const double *xyz0,
const double *xyz1,
const double *xyz2,
const uint32_t *sn0,
const uint32_t *sn1,
const uint32_t *sn2,
const double *w0termsx,
const double *w0termsy,
const double *w0termsz,
const double *w1termsx,
const double *w1termsy,
const double *w1termsz,
double *gradx0,
double *gradx1,
double *gradx2,
double *r)
{
memset(gradx0, 0, dofs * sizeof(double));
memset(gradx1, 0, dofs * sizeof(double));
memset(gradx2, 0, dofs * sizeof(double));
memset(r, 0, dofs * sizeof(double));
/*
Calculates the gradients at the nodes using weighted least squares
This solves using Gram-Schmidt
*/
#pragma omp parallel
{
const uint32_t t = (unsigned int) omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const uint32_t idx0 = (unsigned int) bsz * node0;
const uint32_t idx1 = (unsigned int) bsz * node1;
double dq;
double termx;
double termy;
double termz;
if(part[node0] == t)
{
termx = w0termsx[i];
termy = w0termsy[i];
termz = w0termsz[i];
dq = q[idx1 + 0] - q[idx0 + 0];
gradx0[idx0 + 0] += termx * dq;
gradx1[idx0 + 0] += termy * dq;
gradx2[idx0 + 0] += termz * dq;
dq = q[idx1 + 1] - q[idx0 + 1];
gradx0[idx0 + 1] += termx * dq;
gradx1[idx0 + 1] += termy * dq;
gradx2[idx0 + 1] += termz * dq;
dq = q[idx1 + 2] - q[idx0 + 2];
gradx0[idx0 + 2] += termx * dq;
gradx1[idx0 + 2] += termy * dq;
gradx2[idx0 + 2] += termz * dq;
dq = q[idx1 + 3] - q[idx0 + 3];
gradx0[idx0 + 3] += termx * dq;
gradx1[idx0 + 3] += termy * dq;
gradx2[idx0 + 3] += termz * dq;
}
if(part[node1] == t)
{
termx = w1termsx[i];
termy = w1termsy[i];
termz = w1termsz[i];
dq = q[idx0 + 0] - q[idx1 + 0];
gradx0[idx1 + 0] += termx * dq;
gradx1[idx1 + 0] += termy * dq;
gradx2[idx1 + 0] += termz * dq;
dq = q[idx0 + 1] - q[idx1 + 1];
gradx0[idx1 + 1] += termx * dq;
gradx1[idx1 + 1] += termy * dq;
gradx2[idx1 + 1] += termz * dq;
dq = q[idx0 + 2] - q[idx1 + 2];
gradx0[idx1 + 2] += termx * dq;
gradx1[idx1 + 2] += termy * dq;
gradx2[idx1 + 2] += termz * dq;
dq = q[idx0 + 3] - q[idx1 + 3];
gradx0[idx1 + 3] += termx * dq;
gradx1[idx1 + 3] += termy * dq;
gradx2[idx1 + 3] += termz * dq;
}
}
}
/*
Calculates the fluxes on the face and performs the flux balance
*/
/* AVX512 Registers */
const __m512d _zero = _mm512_set1_pd(0);
const __m512d _pos1 = _mm512_set1_pd(1.0);
const __m512d _pos2 = _mm512_set1_pd(2.0);
const __m512d _half = _mm512_set1_pd(0.5);
const __m512d _nhalf = _mm512_set1_pd(-0.5);
const __m512d _nu95 = _mm512_set1_pd(0.95);
const __m512d _beta = _mm512_set1_pd(B);
#ifdef ARCH_SKY
//const __m512d _rbeta = _mm512_rcp14_pd(_beta);
const __m512d _rbeta = _mm512_div_pd(_pos1, _beta);
#else
const __m512d _rbeta = _mm512_rcp28_pd(_beta);
#endif
const __m256i _bsz = _mm256_set1_epi32(bsz);
const __m256i _shift1 = _mm256_set1_epi32(1);
const __m256i _shift2 = _mm256_set1_epi32(2);
const __m256i _shift3 = _mm256_set1_epi32(3);
const __m512i _ng = _mm512_set1_epi32(-1);
const __m512d _und = _mm512_undefined_pd();
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
const uint32_t lim = ie1 - ((ie1-ie0) % 8);
const __m512i _t = _mm512_set1_epi32(t);
uint32_t i;
for(i = ie0; i < lim; i+=8)
{
const __m512d _xn = _mm512_load_pd((void const *) &x0[i]);
const __m512d _yn = _mm512_load_pd((void const *) &x1[i]);
const __m512d _zn = _mm512_load_pd((void const *) &x2[i]);
const __m512d _ln = _mm512_load_pd((void const *) &x3[i]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
const __m512d _fdot = _mm512_abs_pd(_xn);
__mmask _k0;
__m512d _dot, _X1, _Y1, _Z1;
_k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_LT_OS);
_X1 = _mm512_mask_fnmadd_pd(_xn, _k0, _xn, _pos1);
_Y1 = _mm512_mask_fnmadd_pd(_yn, _k0, _xn, _zero);
_Z1 = _mm512_mask_fnmadd_pd(_zn, _k0, _xn, _zero);
_k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_GE_OS);
_X1 = _mm512_mask_fnmadd_pd(_X1, _k0, _yn, _zero);
_Y1 = _mm512_mask_fnmadd_pd(_Y1, _k0, _yn, _pos1);
_Z1 = _mm512_mask_fnmadd_pd(_Z1, _k0, _yn, _zero);
/*
Normalize the first vector
*/
__m512d _size;
_size = _mm512_mul_pd(_X1, _X1);
_size = _mm512_fmadd_pd(_Y1, _Y1, _size);
_size = _mm512_fmadd_pd(_Z1, _Z1, _size);
#ifdef ARCH_SKY
_size = _mm512_sqrt_pd(_size);
_size = _mm512_div_pd(_pos1, _size);
//_size = _mm512_rsqrt14_pd(_size);
#else
_size = _mm512_rsqrt28_pd(_size);
#endif
_X1 = _mm512_mul_pd(_X1, _size);
_Y1 = _mm512_mul_pd(_Y1, _size);
_Z1 = _mm512_mul_pd(_Z1, _size);
const __m256i _n0 = _mm256_load_si256((__m256i const *) &n0[i]);
const __m256i _n1 = _mm256_load_si256((__m256i const *) &n1[i]);
const __m512d _x00 = _mm512_i32gather_pd(_n0, &xyz0[0], 8);
const __m512d _x01 = _mm512_i32gather_pd(_n0, &xyz1[0], 8);
const __m512d _x02 = _mm512_i32gather_pd(_n0, &xyz2[0], 8);
const __m512d _x10 = _mm512_i32gather_pd(_n1, &xyz0[0], 8);
const __m512d _x11 = _mm512_i32gather_pd(_n1, &xyz1[0], 8);
const __m512d _x12 = _mm512_i32gather_pd(_n1, &xyz2[0], 8);
const __m512d _xmean = _mm512_mul_pd(_half, _mm512_add_pd(_x00, _x10));
const __m512d _ymean = _mm512_mul_pd(_half, _mm512_add_pd(_x01, _x11));
const __m512d _zmean = _mm512_mul_pd(_half, _mm512_add_pd(_x02, _x12));
/*
Take cross-product of normal and V1 to get V2
*/
const __m512d _X2 = _mm512_fmsub_pd(_yn, _Z1, _mm512_mul_pd(_zn, _Y1));
const __m512d _Y2 = _mm512_fmsub_pd(_zn, _X1, _mm512_mul_pd(_xn, _Z1));
const __m512d _Z2 = _mm512_fmsub_pd(_xn, _Y1, _mm512_mul_pd(_yn, _X1));
/*
Compute the stride indices
*/
const __m256i _idx0 = _mm256_mullo_epi32(_bsz, _n0);
const __m256i _idx1 = _mm256_mullo_epi32(_bsz, _n1);
const __m256i _idx01 = _mm256_add_epi32(_idx0, _shift1);
const __m256i _idx11 = _mm256_add_epi32(_idx1, _shift1);
const __m256i _idx02 = _mm256_add_epi32(_idx0, _shift2);
const __m256i _idx12 = _mm256_add_epi32(_idx1, _shift2);
const __m256i _idx03 = _mm256_add_epi32(_idx0, _shift3);
const __m256i _idx13 = _mm256_add_epi32(_idx1, _shift3);
/*
Get variables on "left" and "right" side of face
*/
__m512d _q;
__m512d _ubarL, _ubarR;
__m512d _rx, _ry, _rz;
__m512d _g0, _g1, _g2;
__m512d _pL, _uL, _vL, _wL;
__m512d _pR, _uR, _vR, _wR;
/* Left */
_rx = _mm512_sub_pd(_xmean, _x00);
_ry = _mm512_sub_pd(_ymean, _x01);
_rz = _mm512_sub_pd(_zmean, _x02);
/* Pressure */
_g0 = _mm512_i32gather_pd(_idx0, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx0, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx0, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx0, &q[0], 8);
_pL = _mm512_fmadd_pd(_g0, _rx, _q);
_pL = _mm512_fmadd_pd(_g1, _ry, _pL);
_pL = _mm512_fmadd_pd(_g2, _rz, _pL);
/* Velocity u */
_g0 = _mm512_i32gather_pd(_idx01, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx01, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx01, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx01, &q[0], 8);
_uL = _mm512_fmadd_pd(_g0, _rx, _q);
_uL = _mm512_fmadd_pd(_g1, _ry, _uL);
_uL = _mm512_fmadd_pd(_g2, _rz, _uL);
/* Velocity v */
_g0 = _mm512_i32gather_pd(_idx02, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx02, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx02, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx02, &q[0], 8);
_vL = _mm512_fmadd_pd(_g0, _rx, _q);
_vL = _mm512_fmadd_pd(_g1, _ry, _vL);
_vL = _mm512_fmadd_pd(_g2, _rz, _vL);
/* Velocity w */
_g0 = _mm512_i32gather_pd(_idx03, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx03, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx03, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx03, &q[0], 8);
_wL = _mm512_fmadd_pd(_g0, _rx, _q);
_wL = _mm512_fmadd_pd(_g1, _ry, _wL);
_wL = _mm512_fmadd_pd(_g2, _rz, _wL);
_ubarL = _mm512_mul_pd(_xn, _uL);
_ubarL = _mm512_fmadd_pd(_yn, _vL, _ubarL);
_ubarL = _mm512_fmadd_pd(_zn, _wL, _ubarL);
/* Right */
_rx = _mm512_sub_pd(_xmean, _x10);
_ry = _mm512_sub_pd(_ymean, _x11);
_rz = _mm512_sub_pd(_zmean, _x12);
/* Pressure */
_g0 = _mm512_i32gather_pd(_idx1, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx1, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx1, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx1, &q[0], 8);
_pR = _mm512_fmadd_pd(_g0, _rx, _q);
_pR = _mm512_fmadd_pd(_g1, _ry, _pR);
_pR = _mm512_fmadd_pd(_g2, _rz, _pR);
/* Velocity u */
_g0 = _mm512_i32gather_pd(_idx11, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx11, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx11, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx11, &q[0], 8);
_uR = _mm512_fmadd_pd(_g0, _rx, _q);
_uR = _mm512_fmadd_pd(_g1, _ry, _uR);
_uR = _mm512_fmadd_pd(_g2, _rz, _uR);
/* Velocity v */
_g0 = _mm512_i32gather_pd(_idx12, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx12, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx12, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx12, &q[0], 8);
_vR = _mm512_fmadd_pd(_g0, _rx, _q);
_vR = _mm512_fmadd_pd(_g1, _ry, _vR);
_vR = _mm512_fmadd_pd(_g2, _rz, _vR);
/* Velocity w */
_g0 = _mm512_i32gather_pd(_idx13, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx13, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx13, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx13, &q[0], 8);
_wR = _mm512_fmadd_pd(_g0, _rx, _q);
_wR = _mm512_fmadd_pd(_g1, _ry, _wR);
_wR = _mm512_fmadd_pd(_g2, _rz, _wR);
_ubarR = _mm512_mul_pd(_xn, _uR);
_ubarR = _mm512_fmadd_pd(_yn, _vR, _ubarR);
_ubarR = _mm512_fmadd_pd(_zn, _wR, _ubarR);
const __m512d _dp = _mm512_sub_pd(_pR, _pL);
const __m512d _du = _mm512_sub_pd(_uR, _uL);
const __m512d _dv = _mm512_sub_pd(_vR, _vL);
const __m512d _dw = _mm512_sub_pd(_wR, _wL);
/* Compute averages for velocity variables only */
const __m512d _u = _mm512_mul_pd(_half, _mm512_add_pd(_uL, _uR));
const __m512d _v = _mm512_mul_pd(_half, _mm512_add_pd(_vL, _vR));
const __m512d _w = _mm512_mul_pd(_half, _mm512_add_pd(_wL, _wR));
__m512d _ubar;
_ubar = _mm512_mul_pd(_xn, _u);
_ubar = _mm512_fmadd_pd(_yn, _v, _ubar);
_ubar = _mm512_fmadd_pd(_zn, _w, _ubar);
/* Compute Phi's */
__m512d _phi1;
_phi1 = _mm512_mul_pd(_xn, _beta);
_phi1 = _mm512_fmadd_pd(_u, _ubar, _phi1);
__m512d _phi2;
_phi2 = _mm512_mul_pd(_yn, _beta);
_phi2 = _mm512_fmadd_pd(_v, _ubar, _phi2);
__m512d _phi3;
_phi3 = _mm512_mul_pd(_zn, _beta);
_phi3 = _mm512_fmadd_pd(_w, _ubar, _phi3);
__m512d _phi4;
_phi4 = _mm512_mul_pd(_Z2, _phi2);
_phi4 = _mm512_fmsub_pd(_Y2, _phi3, _phi4);
__m512d _phi5;
_phi5 = _mm512_mul_pd(_X2, _phi3);
_phi5 = _mm512_fmsub_pd(_Z2, _phi1, _phi5);
__m512d _phi6;
_phi6 = _mm512_mul_pd(_Y2, _phi1);
_phi6 = _mm512_fmsub_pd(_X2, _phi2, _phi6);
__m512d _phi7;
_phi7 = _mm512_mul_pd(_Y1, _phi3);
_phi7 = _mm512_fmsub_pd(_Z1, _phi2, _phi7);
__m512d _phi8;
_phi8 = _mm512_mul_pd(_Z1, _phi1);
_phi8 = _mm512_fmsub_pd(_X1, _phi3, _phi8);
__m512d _phi9;
_phi9 = _mm512_mul_pd(_X1, _phi2);
_phi9 = _mm512_fmsub_pd(_Y1, _phi1, _phi9);
/*
Compute eigenvalues, eigenvectors, and strengths
*/
const __m512d _c2 = _mm512_fmadd_pd(_ubar, _ubar, _beta);
#ifdef ARCH_SKY
//const __m512d _c = _mm512_mul_pd(_mm512_rsqrt14_pd(_c2), _c2);
const __m512d _c = _mm512_sqrt_pd(_c2);
//const __m512d _c2r = _mm512_rcp14_pd(_c2);
const __m512d _c2r = _mm512_div_pd(_pos1, _c2);
#else
const __m512d _c = _mm512_mul_pd(_mm512_rsqrt28_pd(_c2), _c2);
const __m512d _c2r = _mm512_rcp28_pd(_c2);
#endif
const __m512d _bac = _mm512_add_pd(_ubar, _c);
const __m512d _bsc = _mm512_sub_pd(_ubar, _c);
/*
Components of T(inverse)
*/
__m512d _ti11;
_ti11 = _mm512_mul_pd(_u, _phi4);
_ti11 = _mm512_fmadd_pd(_v, _phi5, _ti11);
_ti11 = _mm512_fmadd_pd(_w, _phi6, _ti11);
_ti11 = _mm512_fnmadd_pd(_ti11, _rbeta, _zero);
__m512d _ti21;
_ti21 = _mm512_mul_pd(_u, _phi7);
_ti21 = _mm512_fmadd_pd(_v, _phi8, _ti21);
_ti21 = _mm512_fmadd_pd(_w, _phi9, _ti21);
_ti21 = _mm512_fnmadd_pd(_ti21, _rbeta, _zero);
__m512d _ti31;
_ti31 = _mm512_mul_pd(_half, _mm512_sub_pd(_c, _ubar));
_ti31 = _mm512_mul_pd(_ti31, _rbeta);
__m512d _ti41;
_ti41 = _mm512_mul_pd(_nhalf, _bac);
_ti41 = _mm512_mul_pd(_ti41, _rbeta);
/*
jumps (T(inverse) * dq)
*/
__m512d _dv1;
_dv1 = _mm512_mul_pd(_ti11, _dp);
_dv1 = _mm512_fmadd_pd(_phi4, _du, _dv1);
_dv1 = _mm512_fmadd_pd(_phi5, _dv, _dv1);
_dv1 = _mm512_fmadd_pd(_phi6, _dw, _dv1);
_dv1 = _mm512_mul_pd(_dv1, _c2r);
__m512d _dv2;
_dv2 = _mm512_mul_pd(_ti21, _dp);
_dv2 = _mm512_fmadd_pd(_phi7, _du, _dv2);
_dv2 = _mm512_fmadd_pd(_phi8, _dv, _dv2);
_dv2 = _mm512_fmadd_pd(_phi9, _dw, _dv2);
_dv2 = _mm512_mul_pd(_dv2, _c2r);
__m512d _dv34;
_dv34 = _mm512_mul_pd(_xn, _du);
_dv34 = _mm512_fmadd_pd(_yn, _dv, _dv34);
_dv34 = _mm512_fmadd_pd(_zn, _dw, _dv34);
__m512d _dv3;
_dv3 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti31), _dp, _dv34);
_dv3 = _mm512_mul_pd(_dv3, _mm512_mul_pd(_half, _c2r));
__m512d _dv4;
_dv4 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti41), _dp, _dv34);
_dv4 = _mm512_mul_pd(_dv4, _mm512_mul_pd(_half, _c2r));
/*
Now get elements of T
*/
const __m512d _r13 = _mm512_mul_pd(_c, _beta);
__m512d _r23;
_r23 = _mm512_mul_pd(_u, _bac);
_r23 = _mm512_fmadd_pd(_xn, _beta, _r23);
__m512d _r33;
_r33 = _mm512_mul_pd(_v, _bac);
_r33 = _mm512_fmadd_pd(_yn, _beta, _r33);
__m512d _r43;
_r43 = _mm512_mul_pd(_w, _bac);
_r43 = _mm512_fmadd_pd(_zn, _beta, _r43);
const __m512d _r14 = _mm512_fnmadd_pd(_c, _beta, _zero);
__m512d _r24;
_r24 = _mm512_mul_pd(_u, _bsc);
_r24 = _mm512_fmadd_pd(_xn, _beta, _r24);
__m512d _r34;
_r34 = _mm512_mul_pd(_v, _bsc);
_r34 = _mm512_fmadd_pd(_yn, _beta, _r34);
__m512d _r44;
_r44 = _mm512_mul_pd(_w, _bsc);
_r44 = _mm512_fmadd_pd(_zn, _beta, _r44);
/*
Calculate T* |lambda| * T(inverse)
*/
const __m512d _eig1 = _mm512_abs_pd(_ubar);
const __m512d _eig2 = _mm512_abs_pd(_bac);
const __m512d _eig3 = _mm512_abs_pd(_bsc);
__m512d _t1;
_t1 = _mm512_mul_pd(_mm512_mul_pd(_eig2, _r13), _dv3);
_t1 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r14), _dv4, _t1);
__m512d _t2;
_t2 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _X1), _dv1);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _X2), _dv2, _t2);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r23), _dv3, _t2);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r24), _dv4, _t2);
__m512d _t3;
_t3 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Y1), _dv1);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Y2), _dv2, _t3);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r33), _dv3, _t3);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r34), _dv4, _t3);
__m512d _t4;
_t4 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Z1), _dv1);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Z2), _dv2, _t4);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r43), _dv3, _t4);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r44), _dv4, _t4);
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
/* Left Side */
__m512d _fluxp1;
_fluxp1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarL);
__m512d _fluxp2;
_fluxp2 = _mm512_mul_pd(_uL, _ubarL);
_fluxp2 = _mm512_fmadd_pd(_xn, _pL, _fluxp2);
_fluxp2 = _mm512_mul_pd(_ln, _fluxp2);
__m512d _fluxp3;
_fluxp3 = _mm512_mul_pd(_vL, _ubarL);
_fluxp3 = _mm512_fmadd_pd(_yn, _pL, _fluxp3);
_fluxp3 = _mm512_mul_pd(_ln, _fluxp3);
__m512d _fluxp4;
_fluxp4 = _mm512_mul_pd(_wL, _ubarL);
_fluxp4 = _mm512_fmadd_pd(_zn, _pL, _fluxp4);
_fluxp4 = _mm512_mul_pd(_ln, _fluxp4);
/* Right Side */
__m512d _fluxm1;
_fluxm1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarR);
__m512d _fluxm2;
_fluxm2 = _mm512_mul_pd(_uR, _ubarR);
_fluxm2 = _mm512_fmadd_pd(_xn, _pR, _fluxm2);
_fluxm2 = _mm512_mul_pd(_ln, _fluxm2);
__m512d _fluxm3;
_fluxm3 = _mm512_mul_pd(_vR, _ubarR);
_fluxm3 = _mm512_fmadd_pd(_yn, _pR, _fluxm3);
_fluxm3 = _mm512_mul_pd(_ln, _fluxm3);
__m512d _fluxm4;
_fluxm4 = _mm512_mul_pd(_wR, _ubarR);
_fluxm4 = _mm512_fmadd_pd(_zn, _pR, _fluxm4);
_fluxm4 = _mm512_mul_pd(_ln, _fluxm4);
__m512d _res1;
_res1 = _mm512_fnmadd_pd(_ln, _t1, _mm512_add_pd(_fluxm1, _fluxp1));
__m512d _res2;
_res2 = _mm512_fnmadd_pd(_ln, _t2, _mm512_add_pd(_fluxm2, _fluxp2));
__m512d _res3;
_res3 = _mm512_fnmadd_pd(_ln, _t3, _mm512_add_pd(_fluxm3, _fluxp3));
__m512d _res4;
_res4 = _mm512_fnmadd_pd(_ln, _t4, _mm512_add_pd(_fluxm4, _fluxp4));
/* Update the residual */
__m512i _node, _part;
__mmask _next;
_node = _mm512_castsi256_si512(_n0);
_part = _mm512_i32gather_epi32(_node, &part[0], 4);
_next = _mm512_cmpeq_epi32_mask(_part, _t);
/* Conflict detection instructions with multiple node update */
/* Node 0 Contributions */
do {
__m512i _cd, _bnext;
__m512d _v, _d;
__mmask _crt;
_cd = _mm512_mask_conflict_epi32(_ng, _next, _node);
_bnext = _mm512_broadcastmw_epi32(_next);
_crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx0, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res1, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx0, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx01, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res2, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx01, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx02, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res3, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx02, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx03, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res4, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx03, _d, 8);
_next = _mm512_kxor(_next, _crt);
} while(_next);
_node = _mm512_castsi256_si512(_n1);
_part = _mm512_i32gather_epi32(_node, &part[0], 4);
_next = _mm512_cmpeq_epi32_mask(_part, _t);
/* Node 1 Contributions */
do {
__m512i _cd, _bnext;
__m512d _v, _d;
__mmask _crt;
_cd = _mm512_mask_conflict_epi32(_ng, _next, _node);
_bnext = _mm512_broadcastmw_epi32(_next);
_crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx1, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res1, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx1, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx11, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res2, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx11, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx12, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res3, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx12, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx13, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res4, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx13, _d, 8);
_next = _mm512_kxor(_next, _crt);
} while(_next);
}
/* Remainder loop */
for(i = lim; i < ie1; i++)
{
uint32_t node0 = n0[i];
uint32_t node1 = n1[i];
double xn = x0[i];
double yn = x1[i];
double zn = x2[i];
double ln = x3[i];
double xmean = 0.5f * (xyz0[node0] + xyz0[node1]);
double ymean = 0.5f * (xyz1[node0] + xyz1[node1]);
double zmean = 0.5f * (xyz2[node0] + xyz2[node1]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1, Y1, Z1;
double dot = xn;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = -dot * yn;
Z1 = -dot * zn;
}
else
{
dot = yn;
X1 = -dot * xn;
Y1 = 1.f - dot * yn;
Z1 = -dot * zn;
}
/*
Normalize the first vector
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal and V1 to get V2
*/
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/*
Get variables on "left" and "right" side of face
*/
double rx = xmean - xyz0[node0];
double ry = ymean - xyz1[node0];
double rz = zmean - xyz2[node0];
uint32_t idx0 = (unsigned int) bsz * node0;
uint32_t idx1 = (unsigned int) bsz * node1;
// P
double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx;
pL += gradx1[idx0 + 0] * ry;
pL += gradx2[idx0 + 0] * rz;
// Velocity u
double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx;
uL += gradx1[idx0 + 1] * ry;
uL += gradx2[idx0 + 1] * rz;
// Velocity v
double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx;
vL += gradx1[idx0 + 2] * ry;
vL += gradx2[idx0 + 2] * rz;
// Velocity w
double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx;
wL += gradx1[idx0 + 3] * ry;
wL += gradx2[idx0 + 3] * rz;
double ubarL = xn * uL;
ubarL += yn * vL;
ubarL += zn * wL;
rx = xmean - xyz0[node1];
ry = ymean - xyz1[node1];
rz = zmean - xyz2[node1];
// P
double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx;
pR += gradx1[idx1 + 0] * ry;
pR += gradx2[idx1 + 0] * rz;
// Velocity u
double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx;
uR += gradx1[idx1 + 1] * ry;
uR += gradx2[idx1 + 1] * rz;
// Velocity v
double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx;
vR += gradx1[idx1 + 2] * ry;
vR += gradx2[idx1 + 2] * rz;
// Velocity w
double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx;
wR += gradx1[idx1 + 3] * ry;
wR += gradx2[idx1 + 3] * rz;
double ubarR = xn * uR;
ubarR += yn * vR;
ubarR += zn * wR;
/* Compute averages */
//double p = 0.5f * (pL + pR);
double u = 0.5f * (uL + uR);
double v = 0.5f * (vL + vR);
double w = 0.5f * (wL + wR);
double ubar = xn * u;
ubar += yn * v;
ubar += zn * w;
double phi1 = xn * B;
phi1 += u * ubar;
double phi2 = yn * B;
phi2 += v * ubar;
double phi3 = zn * B;
phi3 += w * ubar;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double c2 = ubar * ubar + B;
double c = sqrt(c2);
/*
Now compute eigenvalues, eigenvectors, and strengths
*/
double eig1 = fabs(ubar);
double eig2 = fabs(ubar);
double eig3 = fabs(ubar + c);
double eig4 = fabs(ubar - c);
double dp = pR - pL;
double du = uR - uL;
double dv = vR - vL;
double dw = wR - wL;
/*
Components of T(inverse)
*/
double ti11 = u * phi4;
ti11 += v * phi5;
ti11 += w * phi6;
ti11 = -ti11 / B;
double ti21 = u * phi7;
ti21 += v * phi8;
ti21 += w * phi9;
ti21 = -ti21 / B;
double ti31 = 0.5f * (c - ubar);
ti31 /= B;
double ti41 = -0.5f * (c + ubar);
ti41 /= B;
/*
jumps (T(inverse) * dq)
*/
double dv1 = ti11 * dp;
dv1 += phi4 * du;
dv1 += phi5 * dv;
dv1 += phi6 * dw;
dv1 /= c2;
double dv2 = ti21 * dp;
dv2 += phi7 * du;
dv2 += phi8 * dv;
dv2 += phi9 * dw;
dv2 /= c2;
double dv3 = 2.f * ti31 * dp;
dv3 += xn * du;
dv3 += yn * dv;
dv3 += zn * dw;
dv3 *= 0.5f / c2;
double dv4 = 2.f * ti41 * dp;
dv4 += xn * du;
dv4 += yn * dv;
dv4 += zn * dw;
dv4 *= 0.5f / c2;
/*
Now get elements of T
*/
double r13 = c * B;
double r23 = u * (ubar + c);
r23 += xn * B;
double r33 = v * (ubar + c);
r33 += yn * B;
double r43 = w * (ubar + c);
r43 += zn * B;
double r14 = -c * B;
double r24 = u * (ubar - c);
r24 += xn * B;
double r34 = v * (ubar - c);
r34 += yn * B;
double r44 = w * (ubar - c);
r44 += zn * B;
/*
Calculate T* |lambda| * T(inverse)
*/
double t1 = eig3 * r13 * dv3 + eig4 * r14 * dv4;
double t2 = eig1 * X1 * dv1 + eig2 * X2 * dv2;
t2 += eig3 * r23 * dv3 + eig4 * r24 * dv4;
double t3 = eig1 * Y1 * dv1 + eig2 * Y2 * dv2;
t3 += eig3 * r33 * dv3 + eig4 * r34 * dv4;
double t4 = eig1 * Z1 * dv1 + eig2 * Z2 * dv2;
t4 += eig3 * r43 * dv3 + eig4 * r44 * dv4;
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
double fluxp1 = ln * B * ubarL;
double fluxp2 = ln * (uL * ubarL + xn * pL);
double fluxp3 = ln * (vL * ubarL + yn * pL);
double fluxp4 = ln * (wL * ubarL + zn * pL);
/*
Now the right side
*/
double fluxm1 = ln * B * ubarR;
double fluxm2 = ln * (uR * ubarR + xn * pR);
double fluxm3 = ln * (vR * ubarR + yn * pR);
double fluxm4 = ln * (wR * ubarR + zn * pR);
double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1);
double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2);
double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3);
double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4);
if(part[node0] == t)
{
r[idx0 + 0] = r[idx0 + 0] + res1;
r[idx0 + 1] = r[idx0 + 1] + res2;
r[idx0 + 2] = r[idx0 + 2] + res3;
r[idx0 + 3] = r[idx0 + 3] + res4;
}
if(part[node1] == t)
{
r[idx1 + 0] = r[idx1 + 0] - res1;
r[idx1 + 1] = r[idx1 + 1] - res2;
r[idx1 + 2] = r[idx1 + 2] - res3;
r[idx1 + 3] = r[idx1 + 3] - res4;
}
}
}
uint32_t i;
for(i = 0; i < snfc; i++)
{
uint32_t if0 = snfic[i];
uint32_t if1 = snfic[i+1];
uint32_t j;
#pragma omp parallel for
for(j = if0; j < if1; j++)
{
uint32_t node0 = sn0[j];
uint32_t node1 = sn1[j];
uint32_t node2 = sn2[j];
double p1 = q[bsz * node0];
double p2 = q[bsz * node1];
double p3 = q[bsz * node2];
double ax = xyz0[node1] - xyz0[node0];
double ay = xyz1[node1] - xyz1[node0];
double az = xyz2[node1] - xyz2[node0];
double bx = xyz0[node2] - xyz0[node0];
double by = xyz1[node2] - xyz1[node0];
double bz = xyz2[node2] - xyz2[node0];
/*
Normal points away from grid interior.
Magnitude is 1/3 area of surface triangle.
*/
double xn = ay * bz;
xn -= az * by;
xn *= MAG1;
double yn = ax * bz;
yn -= az * bx;
yn *= MAG0;
double zn = ax * by;
zn -= ay * bx;
zn *= MAG1;
double pa = 0.125f * (p2 + p3);
pa += 0.75f * p1;
double pb = 0.125f * (p3 + p1);
pb += 0.75f * p2;
double pc = 0.125f * (p1 + p2);
pc += 0.75f * p3;
uint32_t idx;
idx = bsz * node0;
r[idx + 1] += xn * pa;
r[idx + 2] += yn * pa;
r[idx + 3] += zn * pa;
idx = bsz * node1;
r[idx + 1] += xn * pb;
r[idx + 2] += yn * pb;
r[idx + 3] += zn * pb;
idx = bsz * node2;
r[idx + 1] += xn * pc;
r[idx + 2] += yn * pc;
r[idx + 3] += zn * pc;
}
}
/* Do the free boundaries */
#pragma omp parallel for
for(i = 0; i < nfnodes; i++)
{
uint32_t n = nfptr[i];
/*
Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn
has the magnitude of the face contained in it.
*/
double xn = f_xyz0[i];
double yn = f_xyz1[i];
double zn = f_xyz2[i];
double area = xn * xn;
area += yn * yn;
area += zn * zn;
area = sqrt(area);
xn /= area;
yn /= area;
zn /= area;
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1, Y1, Z1;
double dot = xn;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = -dot * yn;
Z1 = -dot * zn;
}
else
{
dot = yn;
X1 = -dot * xn;
Y1 = 1.f - dot * yn;
Z1 = -dot * zn;
}
/*
Normalize the first vector (V1)
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal with V1 to get V2
*/
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/*
Calculate elements of T and T(inverse) evaluated at free-stream
*/
double ubar0 = xn * U;
ubar0 += yn * V;
ubar0 += zn * W;
double c20 = ubar0 * ubar0 + B;
double c0 = sqrt(c20);
double phi1 = xn * B;
phi1 += U * ubar0;
double phi2 = yn * B;
phi2 += V * ubar0;
double phi3 = zn * B;
phi3 += W * ubar0;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double t13 = c0 * B;
double t23 = U * (ubar0 + c0);
t23 += xn * B;
double t33 = V * (ubar0 + c0);
t33 += yn * B;
double t43 = W * (ubar0 + c0);
t43 += zn * B;
double t14 = -c0 * B;
double t24 = U * (ubar0 - c0);
t24 += xn * B;
double t34 = V * (ubar0 - c0);
t34 += yn * B;
double t44 = W * (ubar0 - c0);
t44 += zn * B;
double ti11 = U * phi4;
ti11 += V * phi5;
ti11 += W * phi6;
ti11 = -ti11/B;
double ti21 = U * phi7;
ti21 += V * phi8;
ti21 += W * phi9;
ti21 = -ti21/B;
double ti31 = 0.5f * (c0 - ubar0);
ti31 /= B;
double ti41 = -0.5f * (c0 + ubar0);
ti41 /= B;
/*
Now, get the variables on the "inside"
*/
double pi = q[bsz * n + 0];
double ui = q[bsz * n + 1];
double vi = q[bsz * n + 2];
double wi = q[bsz * n + 3];
double un = xn * ui;
un += yn * vi;
un += zn * wi;
/*
If ubar is negative, take the reference condition from outside
*/
double pr, ur, vr, wr;
if(un > 0.f)
{
pr = pi;
ur = ui;
vr = vi;
wr = wi;
}
else
{
pr = P;
ur = U;
vr = V;
wr = W;
}
/*
Set rhs
*/
double rhs1 = ti11 * pr;
rhs1 += phi4 * ur;
rhs1 += phi5 * vr;
rhs1 += phi6 * wr;
rhs1 /= c20;
double rhs2 = ti21 * pr;
rhs2 += phi7 * ur;
rhs2 += phi8 * vr;
rhs2 += phi9 * wr;
rhs2 /= c20;
double rhs3 = 2.f * ti31 * pi;
rhs3 += xn * ui;
rhs3 += yn * vi;
rhs3 += zn * wi;
rhs3 = 0.5f * rhs3 / c20;
double rhs4 = 2.f * ti41 * P;
rhs4 += xn * U;
rhs4 += yn * V;
rhs4 += zn * W;
rhs4 = 0.5f * rhs4 / c20;
/*
Now do matrix multiplication to get values on boundary
*/
double pb = t13 * rhs3;
pb += t14 * rhs4;
double ub = X1 * rhs1;
ub += X2 * rhs2;
ub += t23 * rhs3;
ub += t24 * rhs4;
double vb = Y1 * rhs1;
vb += Y2 * rhs2;
vb += t33 * rhs3;
vb += t34 * rhs4;
double wb = Z1 * rhs1;
wb += Z2 * rhs2;
wb += t43 * rhs3;
wb += t44 * rhs4;
double ubar = xn * ub;
ubar += yn * vb;
ubar += zn * wb;
uint32_t idx = (unsigned int) bsz * n;
r[idx + 0] += area * B * ubar;
r[idx + 1] += area * (ub * ubar + xn * pb);
r[idx + 2] += area * (vb * ubar + yn * pb);
r[idx + 3] += area * (wb * ubar + zn * pb);
}
}
void
ComputeFlux(const GEOMETRY *g, const double *q, GRADIENT *grad, double *r)
{
BENCH start_bench = rdbench();
_KRN_ComputeFlux(
g->b->f->sz,
g->c->b,
g->b->f->nptr,
g->b->f->xyz->x0,
g->b->f->xyz->x1,
g->b->f->xyz->x2,
g->s->i,
g->n->part,
g->e->eptr->n0,
g->e->eptr->n1,
g->e->xyzn->x0,
g->e->xyzn->x1,
g->e->xyzn->x2,
g->e->xyzn->x3,
q,
g->c->sz,
g->t->sz,
g->t->i,
g->n->xyz->x0,
g->n->xyz->x1,
g->n->xyz->x2,
g->b->fc->fptr->n0,
g->b->fc->fptr->n1,
g->b->fc->fptr->n2,
g->e->w->w0->x0,
g->e->w->w0->x1,
g->e->w->w0->x2,
g->e->w->w1->x0,
g->e->w->w1->x1,
g->e->w->w1->x2,
grad->x0,
grad->x1,
grad->x2,
r
);
fun3d_log(start_bench, KERNEL_FLUX);
}
|
Compute.h | #ifndef COMPUTE_H_INCLUDED
#define COMPUTE_H_INCLUDED
#include <stdio.h>
#include <stdlib.h>
#include <SDL2/SDL.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <immintrin.h>
#include "Grad.h"
__float128 dx[10000],dy[10000],x0,y01;
float Ax,Ay,Bx,By,Cx,Cy;
float A,B,C,Ai,Bi,Ci;
float dxl[10000],dyl[10000];
int apnum;
char setpoint=1;
inline static void Screenshot(__float128 m,__float128 ph,__float128 pv,int iter, int res,__float128 mcx,__float128 mcy)
{
char file[30];
int height=HEIGHT*res, width=WIDTH*res;
unsigned char *pixels = malloc(height*4*width),tcb,tcg,tcr;
__float128 prex=(width*(-0.5)+1.0*m*ph*res),prey=(height*(-0.5)-1.0*m*pv*res);
__m128 zx,zy,cx,cy,x,y,four,mask,sum;
__m128 xy,tx,tx1;
__m128 k,iterace,one;
int off,i,j,off1,l,off2,off3,rac,bac,gac;
iterace=_mm_set1_ps(iter);
one=_mm_set1_ps(1.0);
four= _mm_set1_ps(4.0);
__float128 invert=1.0/(360*m*res);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels,dx,dy,x0,y01,Ax,Ay,Bx,By,Cx,Cy) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,mask,xy,tx,tx1,tcb,tcg,tcr)
for(i=3; i<height-3; i+=3)
{
for(j=3; j<width-3; j+=3)
{
off = 4*width*i+(j<<2);
off1 = 4*width*(i+1)+(j<<2);
off2 = 4*width*(i+2)+(j<<2);
x=cx=_mm_setr_ps(((j+prex)*invert-x0),((j+prex+2)*invert-x0),((j+prex)*invert-x0),((j+prex+2)*invert-x0));
y=cy=_mm_setr_ps(((i+prey)*invert-y01),((i+prey)*invert-y01),((i+prey+2)*invert-y01),((i+prey+2)*invert-y01));
k=_mm_setzero_ps();
l=0;
rac=bac=gac=0;
if(m>1e15&&apnum)
{
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm_set1_ps(dxl[l]);
tx1=_mm_set1_ps(dyl[l]);
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm_add_ps(xy,cy);
mask= _mm_cmplt_ps(sum,four);
k=_mm_add_ps(k,_mm_and_ps(one,mask));
}
while(++l<iter&&_mm_movemask_ps(mask));
k=_mm_div_ps(k,iterace);k*=8000.0f;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k[2]);
gac+=pixels[off2+1] = colg(k[2]);
rac+=pixels[off2+2] = colr(k[2]);
bac+=pixels[off2+8] = colb(k[3]);
gac+=pixels[off2+9] = colg(k[3]);
rac+=pixels[off2+10] = colr(k[3]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else{
x=cx=_mm_setr_ps(((j+prex+1)*invert-x0),((j+prex)*invert-x0),((j+prex+2)*invert-x0),((j+prex+1)*invert-x0));
y=cy=_mm_setr_ps(((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey+1)*invert-y01),((i+prey+2)*invert-y01));
k=_mm_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm_set1_ps(dxl[l]);
tx1=_mm_set1_ps(dyl[l]);
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm_add_ps(xy,cy);
mask= _mm_cmplt_ps(sum,four);
k=_mm_add_ps(k,_mm_and_ps(one,mask));
}
while(++l<iter&&_mm_movemask_ps(mask));
k=_mm_div_ps(k,iterace);k*=8000.0f;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k[2]);
gac+=pixels[off1+9] = colg(k[2]);
rac+=pixels[off1+10] = colr(k[2]);
bac+=pixels[off2+4] = colb(k[3]);
gac+=pixels[off2+5] = colg(k[3]);
rac+=pixels[off2+6] = colr(k[3]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
SDL_Surface *surf = SDL_CreateRGBSurfaceFrom(pixels, width, height, 8*4, width*4, 0, 0, 0, 0);
sprintf_s(file,30,"images/%d.bmp",time(NULL));
SDL_SaveBMP(surf,file);
SDL_FreeSurface(surf);
free(pixels);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline static void Render(unsigned char *pixels,__float128 m,__float128 ph,__float128 pv,int iter,__float128 mcx,
__float128 mcy,char index,char index2,char index3)
{
__float128 prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__float128 px,py;
__m128 zx,zy,cx,cy,x,y,four,mask,sum;
__m128 xy,tx,tx1;
__m128 k,iterace,one;
iterace=_mm_set1_ps(iter);
one=_mm_set1_ps(1.0f);
four= _mm_set1_ps(4.0f);
__float128 invert=(1.0/(360.0*m));
int off,i,j,off1,l,f,off2,off3,rac,bac,gac;
unsigned char tcb,tcr,tcg;
if(setpoint)
{
dx[0]=x0=mcx;
dy[0]=y01=mcy;
for(f=0; f<9999; f++)
{
px=dx[f]*dx[f];
py=dy[f]*dy[f];
dy[f+1]=2.0*dx[f]*dy[f]+y01;
dx[f+1]=px-py+x0;
}
#pragma omp parallel for simd
for(i=0; i<9999; i++)
{
dxl[i]=dx[i];
dyl[i]=dy[i];
}
Ax=1.0f;
Ay=Bx=By=Cx=Cy=0.0f;
apnum=0;
for(f=0; f<iter; f++)
{
C=2.0f*(dxl[f]*Cx-dyl[f]*Cy+Ax*Bx-Ay*By);
Ci=2.0f*(dyl[f]*Cx+dxl[f]*Cy+Ay*Bx+Ax*By);
B=2.0f*(dxl[f]*Bx-dyl[f]*By)+Ax*Ax-Ay*Ay;
Bi=2.0f*(dyl[f]*Bx+dxl[f]*By+Ax*Ay);
A=2.0f*(dxl[f]*Ax-dyl[f]*Ay)+1.0f;
Ai=2.0f*(dyl[f]*Ax+dxl[f]*Ay);
if(A>2e200||Ai>2e200||B>2e200||Bi>2e200||C>2e200||Ci>2e200)break;
if(A<-2e200||Ai<-2e200||B<-2e200||Bi<-2e200||C<-2e200||Ci<-2e200)break;
Cx=C;
Cy=Ci;
Bx=B;
By=Bi;
Ax=A;
Ay=Ai;
}
apnum=f;
printf("A %e %e B %e %e C %e %e Skipped: %d/%d\n",Ax,Ay,Bx,By,Cx,Cy,apnum,iter);
setpoint=0;
}
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels,dx,dy,x0,y01,Ax,Ay,Bx,By,Cx,Cy) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,mask,xy,tx,tx1,tcb,tcg,tcr)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3; j<WIDTH-3; j+=3+index2*3)
{
if(j<3)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
x=cx=_mm_setr_ps(((j+prex)*invert-x0),((j+prex+2)*invert-x0),((j+prex)*invert-x0),((j+prex+2)*invert-x0));
y=cy=_mm_setr_ps(((i+prey)*invert-y01),((i+prey)*invert-y01),((i+prey+2)*invert-y01),((i+prey+2)*invert-y01));
k=_mm_setzero_ps();
l=0;
rac=bac=gac=0;
if(m>1e15&&apnum)
{
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm_set1_ps(dxl[l]);
tx1=_mm_set1_ps(dyl[l]);
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm_add_ps(xy,cy);
mask= _mm_cmplt_ps(sum,four);
k=_mm_add_ps(k,_mm_and_ps(one,mask));
}
while(++l<iter&&_mm_movemask_ps(mask));
k=_mm_div_ps(k,iterace);k*=8000.0f;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k[2]);
gac+=pixels[off2+1] = colg(k[2]);
rac+=pixels[off2+2] = colr(k[2]);
bac+=pixels[off2+8] = colb(k[3]);
gac+=pixels[off2+9] = colg(k[3]);
rac+=pixels[off2+10] = colr(k[3]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else{
x=cx=_mm_setr_ps(((j+prex+1)*invert-x0),((j+prex)*invert-x0),((j+prex+2)*invert-x0),((j+prex+1)*invert-x0));
y=cy=_mm_setr_ps(((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey+1)*invert-y01),((i+prey+2)*invert-y01));
k=_mm_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm_set1_ps(dxl[l]);
tx1=_mm_set1_ps(dyl[l]);
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm_add_ps(xy,cy);
mask= _mm_cmplt_ps(sum,four);
k=_mm_add_ps(k,_mm_and_ps(one,mask));
}
while(++l<iter&&_mm_movemask_ps(mask));
k=_mm_div_ps(k,iterace);k*=8000.0f;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k[2]);
gac+=pixels[off1+9] = colg(k[2]);
rac+=pixels[off1+10] = colr(k[2]);
bac+=pixels[off2+4] = colb(k[3]);
gac+=pixels[off2+5] = colg(k[3]);
rac+=pixels[off2+6] = colr(k[3]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
}
inline static void RenderCol(unsigned char *pixels,__float128 m,__float128 ph,__float128 pv,int iter,__float128 mcx,
__float128 mcy,char index,char index2,char index3)
{
__float128 prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__float128 px,py;
__m128 zx,zy,cx,cy,x,y,four,mask,sum;
__m128 xy,tx,tx1;
__m128 k,iterace,one;
iterace=_mm_set1_ps(iter);
one=_mm_set1_ps(1.0f);
four= _mm_set1_ps(4.0f);
__float128 invert=(1.0/(360.0*m));
int off,i,j,off1,l,f,off2,off3,rac,bac,gac;
unsigned char tcb,tcr,tcg;
if(setpoint)
{
dx[0]=x0=mcx;
dy[0]=y01=mcy;
for(f=0; f<9999; f++)
{
px=dx[f]*dx[f];
py=dy[f]*dy[f];
dy[f+1]=2.0*dx[f]*dy[f]+y01;
dx[f+1]=px-py+x0;
}
#pragma omp parallel for simd
for(i=0; i<9999; i++)
{
dxl[i]=dx[i];
dyl[i]=dy[i];
}
Ax=1.0f;
Ay=Bx=By=Cx=Cy=0.0f;
apnum=0;
for(f=0; f<iter; f++)
{
C=2.0f*(dxl[f]*Cx-dyl[f]*Cy+Ax*Bx-Ay*By);
Ci=2.0f*(dyl[f]*Cx+dxl[f]*Cy+Ay*Bx+Ax*By);
B=2.0f*(dxl[f]*Bx-dyl[f]*By)+Ax*Ax-Ay*Ay;
Bi=2.0f*(dyl[f]*Bx+dxl[f]*By+Ax*Ay);
A=2.0f*(dxl[f]*Ax-dyl[f]*Ay)+1.0f;
Ai=2.0f*(dyl[f]*Ax+dxl[f]*Ay);
if(A>2e200||Ai>2e200||B>2e200||Bi>2e200||C>2e200||Ci>2e200)break;
if(A<-2e200||Ai<-2e200||B<-2e200||Bi<-2e200||C<-2e200||Ci<-2e200)break;
Cx=C;
Cy=Ci;
Bx=B;
By=Bi;
Ax=A;
Ay=Ai;
}
apnum=f;
printf("A %e %e B %e %e C %e %e Skipped: %d/%d\n",Ax,Ay,Bx,By,Cx,Cy,apnum,iter);
setpoint=0;
}
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels,dx,dy,x0,y01,Ax,Ay,Bx,By,Cx,Cy) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,mask,xy,tx,tx1,tcb,tcg,tcr)
for(i=3*index*index2+3; i<HEIGHT-3; i+=3*(1+index2))
{
for(j=(index*index2+index3)*3+2*WIDTH/5-3; j<WIDTH-3; j+=3+index2*3)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
x=cx=_mm_setr_ps(((j+prex)*invert-x0),((j+prex+2)*invert-x0),((j+prex)*invert-x0),((j+prex+2)*invert-x0));
y=cy=_mm_setr_ps(((i+prey)*invert-y01),((i+prey)*invert-y01),((i+prey+2)*invert-y01),((i+prey+2)*invert-y01));
k=_mm_setzero_ps();
l=0;
rac=bac=gac=0;
if(m>1e15&&apnum)
{
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm_set1_ps(dxl[l]);
tx1=_mm_set1_ps(dyl[l]);
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm_add_ps(xy,cy);
mask= _mm_cmplt_ps(sum,four);
k=_mm_add_ps(k,_mm_and_ps(one,mask));
}
while(++l<iter&&_mm_movemask_ps(mask));
k=_mm_div_ps(k,iterace);k*=8000.0f;
bac+=tcb=pixels[off] = colb(k[0]);
gac+=tcg=pixels[off+1] = colg(k[0]);
rac+=tcr=pixels[off+2] = colr(k[0]);
bac+=pixels[off+8] = colb(k[1]);
gac+=pixels[off+9] = colg(k[1]);
rac+=pixels[off+10] = colr(k[1]);
bac+=pixels[off2] = colb(k[2]);
gac+=pixels[off2+1] = colg(k[2]);
rac+=pixels[off2+2] = colr(k[2]);
bac+=pixels[off2+8] = colb(k[3]);
gac+=pixels[off2+9] = colg(k[3]);
rac+=pixels[off2+10] = colr(k[3]);
if(tcb==pixels[off+8]&&tcb==pixels[off2]&&tcb==pixels[off2+8]&&
tcg==pixels[off+9]&&tcg==pixels[off2+1]&&tcg==pixels[off2+9]&&
tcr==pixels[off+10]&&tcr==pixels[off2+2]&&tcr==pixels[off2+10])
{
pixels[off+4]=pixels[off1]=pixels[off1+4]=pixels[off1+8]=pixels[off2+4]=tcb;
pixels[off+5]=pixels[off1+1]=pixels[off1+5]=pixels[off1+9]=pixels[off2+5]=tcg;
pixels[off+6]=pixels[off1+2]=pixels[off1+6]=pixels[off1+10]=pixels[off2+6]=tcr;
}
else{
x=cx=_mm_setr_ps(((j+prex+1)*invert-x0),((j+prex)*invert-x0),((j+prex+2)*invert-x0),((j+prex+1)*invert-x0));
y=cy=_mm_setr_ps(((i+prey)*invert-y01),((i+prey+1)*invert-y01),((i+prey+1)*invert-y01),((i+prey+2)*invert-y01));
k=_mm_setzero_ps();
l=0;
if(m>1e15&&apnum)
{
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_sub_ps(zx,zy);
xy=x*y;
tx=Ax*x-Ay*y+Bx*(sum)-2.0f*By*xy+Cx*x*(zx-3.0f*zy)+Cy*y*(zy-3.0f*zx);
y=Ax*y+Ay*x+2.0f*Bx*xy+By*(sum)+Cx*y*(3.0f*zx-zy)+Cy*x*(zx-3.0f*zy);
x=tx;
l=apnum;
k+=(float)apnum;
}
do
{
tx=_mm_set1_ps(dxl[l]);
tx1=_mm_set1_ps(dyl[l]);
zx=_mm_mul_ps(x,x);
zy=_mm_mul_ps(y,y);
sum=_mm_add_ps(zy,zx);
xy=2.0f*(y*(x+tx)+x*tx1);
x=2.0f*(tx*x-tx1*y)+zx-zy+cx;
y=_mm_add_ps(xy,cy);
mask= _mm_cmplt_ps(sum,four);
k=_mm_add_ps(k,_mm_and_ps(one,mask));
}
while(++l<iter&&_mm_movemask_ps(mask));
k=_mm_div_ps(k,iterace);k*=8000.0f;
bac+=pixels[off+4] = colb(k[0]);
gac+=pixels[off+5] = colg(k[0]);
rac+=pixels[off+6] = colr(k[0]);
bac+=pixels[off1] = colb(k[1]);
gac+=pixels[off1+1] = colg(k[1]);
rac+=pixels[off1+2] = colr(k[1]);
bac+=pixels[off1+8] = colb(k[2]);
gac+=pixels[off1+9] = colg(k[2]);
rac+=pixels[off1+10] = colr(k[2]);
bac+=pixels[off2+4] = colb(k[3]);
gac+=pixels[off2+5] = colg(k[3]);
rac+=pixels[off2+6] = colr(k[3]);
pixels[off1+4] = bac>>3;
pixels[off1+5] = gac>>3;
pixels[off1+6] = rac>>3;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
}
#endif // COMPUTE_H_INCLUDED
|
GB_binop__iseq_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint32)
// A*D function (colscale): GB (_AxD__iseq_uint32)
// D*A function (rowscale): GB (_DxB__iseq_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint32)
// C=scalar+B GB (_bind1st__iseq_uint32)
// C=scalar+B' GB (_bind1st_tran__iseq_uint32)
// C=A+scalar GB (_bind2nd__iseq_uint32)
// C=A'+scalar GB (_bind2nd_tran__iseq_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT32 || GxB_NO_ISEQ_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rminus_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int64)
// A*D function (colscale): GB (_AxD__rminus_int64)
// D*A function (rowscale): GB (_DxB__rminus_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int64)
// C=scalar+B GB (_bind1st__rminus_int64)
// C=scalar+B' GB (_bind1st_tran__rminus_int64)
// C=A+scalar GB (_bind2nd__rminus_int64)
// C=A'+scalar GB (_bind2nd_tran__rminus_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT64 || GxB_NO_RMINUS_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rminus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph.h | #pragma once
#include<iostream>
#include<unordered_map>
#include<unordered_set>
#include<string>
#include<vector>
#include<queue>
#include<limits>
#include<fstream>
#include<stack>
#include<sstream>
#include<cmath>
#include<cstring>
#include<exception>
#include<stack>
#include<list>
#include<functional>
#include"util.h"
#include"pQueue.h"
#include"parallelFileOp.h"
#include"parallelEdgeListLoad.h"
#include<omp.h>
using namespace std;
class Graph{
public:
Graph(){};
Graph(const string& path, const unordered_set<nodeIdx>& subset){
list<edge> edgeList;
fastLoadEdgeList(path, edgeList, subset);
vector<edge> edgeVec;
edgeVec.reserve(edgeList.size());
for(auto& e : edgeList){
edgeVec.emplace_back(move(e));
}
edgeCount = edgeVec.size();
#pragma omp parallel
{
unordered_map<nodeIdx, unordered_map<nodeIdx, float>> localGraphData;
#pragma omp for
for(size_t i = 0; i < edgeVec.size(); ++i){
const edge& e = edgeVec[i];
localGraphData[e.a][e.b] = e.weight;
localGraphData[e.b][e.a] = e.weight;
}
#pragma omp critical
{
for(auto& pair : localGraphData){
data[pair.first].insert(
pair.second.begin(),
pair.second.end()
);
}
}
}
}
size_t numNodes() const { return data.size();}
size_t numEdges() const { return edgeCount;}
void addEdges(const list<edge>& edges){
for(const edge& e : edges){
addEdge(e.a, e.b, e.weight);
}
}
void addEdge(nodeIdx a, nodeIdx b, float weight){
//if(weight < 0)
//throw runtime_error("Invalid edge");
if(a != UNDEFINED && b != UNDEFINED){
data[a][b] = weight;
data[b][a] = weight;
++edgeCount;
}
}
unordered_set<nodeIdx> getSelectNearby(nodeIdx source,
size_t maxResult,
function<bool(nodeIdx)> filter) const{
typedef pair<nodeIdx, float> halfEdge;
pQueue<nodeIdx, float> pq;
unordered_set<nodeIdx> visited, result(maxResult);
pq.push(source, 0);
visited.insert(source);
if(filter(source)) result.insert(source);
while(!pq.empty() && result.size() < maxResult){
halfEdge cEdge = pq.pop();
visited.insert(cEdge.first);
if(filter(cEdge.first)) result.insert(cEdge.first);
for(const halfEdge& nEdge : data.at(cEdge.first)){
if(visited.find(nEdge.first) == visited.end())
pq.push(nEdge.first, cEdge.second + nEdge.second);
}
}
return result;
}
//bool connected(nodeIdx a, nodeIdx b) const {
//unordered_map<nodeIdx, nodeIdx> node2set;
//vector<nodeIdx> nodes;
//nodes.reserve(data.size());
//for(const auto& p : data)
//nodes.push_back(p.first);
////initial set
//#pragma omp parallel for
//for(size_t i = 0; i < nodes.size(); ++i){
//const nodeIdx node = nodes[i];
//const unordered_map<nodeIdx, float>& edges = data.at(node);
//nodeIdx minId = node;
//for(const auto& e : edges)
//minId = min(e.first, minId);
//#pragma omp critical
//node2set.emplace(node, minId);
//}
//bool anyChanges = true;
//while(anyChanges){
//anyChanges = false;
//if(node2set[a] == node2set[b]){
//return true;
//}
//#pragma omp parallel
//{
//unordered_map<nodeIdx, nodeIdx> localUpdates;
//#pragma omp for
//for(size_t i = 0; i < nodes.size(); ++i){
//const nodeIdx node = nodes[i];
//const unordered_map<nodeIdx, float>& edges = data.at(node);
//nodeIdx minId = node2set.at(node);
//for(const auto& e : edges)
//if(node2set.find(e.first) != node2set.end())
//minId = min(node2set.at(e.first), minId);
//if(minId != node2set.at(node)){
//localUpdates[node] = minId;
//}
//}
//#pragma omp critical
//{
//if(localUpdates.size() > 0){
//anyChanges = true;
//node2set.insert(localUpdates.begin(), localUpdates.end());
//}
//}
//}
//}
//return false;
//}
unordered_set<nodeIdx> getNeighbors(nodeIdx source, size_t n) const{
unordered_set<nodeIdx> visited;
pQueue<nodeIdx, float> pq;
pq.push(source, 0);
// n+1 so that we don't return source
while(!pq.empty() && visited.size() < n+1){
halfEdge cEdge = pq.pop();
visited.insert(cEdge.first);
for(const halfEdge & nEdge : data.at(cEdge.first)){
if(visited.find(nEdge.first) == visited.end())
pq.push(nEdge.first, cEdge.second + nEdge.second);
}
}
visited.erase(source);
return visited;
}
vector<nodeIdx> getShortestPath(nodeIdx source, nodeIdx target) const{
vector<nodeIdx> res;
// first, we are going to tighten edges
pQueue<nodeIdx, float> pq;
unordered_map<nodeIdx, float> finalDists;
pq.push(source, 0);
while(!pq.empty() && finalDists.find(target) == finalDists.end()){
halfEdge cEdge = pq.pop();
finalDists[cEdge.first] = cEdge.second; // record the tight edge
for(const halfEdge & nEdge : data.at(cEdge.first)){
if(finalDists.find(nEdge.first) == finalDists.end()){
pq.push(nEdge.first, nEdge.second + cEdge.second);
}
}
}
// if we found the goal
if(finalDists.find(target) != finalDists.end()){
//we traverse only tight edges dfs from s to t
stack<nodeIdx> stk;
stk.push(source);
unordered_map<nodeIdx, nodeIdx> backPointers;
nodeIdx cNode;
while((cNode = stk.top()) != target){
bool foundTightEdge = false; // we have to back track if we get stuck
for(const halfEdge & nEdge : data.at(cNode)){
const nodeIdx & nNode = nEdge.first;
// no self loop
if(cNode != nNode
// was visited
&& finalDists.find(nEdge.first) != finalDists.end()
// is tight edge
&& abs(finalDists[cNode] + data.at(cNode).at(nNode) - finalDists[nNode]) < EPS
// not a backpointer already
&& backPointers.find(nNode) == backPointers.end()){
stk.push(nNode);
backPointers[nNode] = cNode;
foundTightEdge = true;
}
}
if(!foundTightEdge){
stk.pop(); // backtrack
}
}
stack<nodeIdx> backTraversal;
backTraversal.push(target);
while(backTraversal.top() != source){
backTraversal.push(backPointers[backTraversal.top()]);
}
while(!backTraversal.empty()){
res.push_back(backTraversal.top());
backTraversal.pop();
}
}
return res;
}
list<edge> toEdgeList() const{
list<edge> res;
for(const auto & node2Negh : data){
const nodeIdx& i = node2Negh.first;
for(const auto & pair : node2Negh.second){
if(pair.first > i){
edge e;
e.a = i; e.b = pair.first; e.weight = pair.second;
res.push_back(e);
}
}
}
return res;
}
friend ostream& operator<<(ostream& out, const Graph& g);
unordered_set<nodeIdx> getNodes() const {
unordered_set<nodeIdx> res;
res.reserve(data.size());
for(const auto & p : data){
res.insert(p.first);
}
return res;
}
private:
typedef pair<nodeIdx, float> halfEdge; // half edge only has one node
unordered_map<nodeIdx, unordered_map<nodeIdx, float>> data;
size_t edgeCount;
};
ostream& operator<<(ostream& out, const Graph& g){
for(const auto & pair : g.data){
for(const auto & edge : pair.second){
out << pair.first << " " << edge.first << " " << edge.second << endl;
}
}
return out;
}
|
GB_unaryop__identity_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_int8
// op(A') function: GB_tran__identity_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_int8
(
uint8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
threading.h | #ifndef LIGHTGBM_UTILS_THREADING_H_
#define LIGHTGBM_UTILS_THREADING_H_
#include <LightGBM/utils/openmp_wrapper.h>
#include <vector>
#include <functional>
namespace LightGBM {
class Threading {
public:
template<typename INDEX_T>
static inline void For(INDEX_T start, INDEX_T end, const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) {
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
INDEX_T num_inner = (end - start + num_threads - 1) / num_threads;
if (num_inner <= 0) { num_inner = 1; }
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < num_threads; ++i) {
INDEX_T inner_start = start + num_inner * i;
INDEX_T inner_end = inner_start + num_inner;
if (inner_end > end) { inner_end = end; }
if (inner_start < end) {
inner_fun(i, inner_start, inner_end);
}
}
}
};
} // namespace LightGBM
#endif // LightGBM_UTILS_THREADING_H_
|
perSentenceStats.h | /**
* @author Samuel Larkin
* @file eval/perSentenceStats.h
* @brief A scoring metric wrap to perform on sentence level scoring.
*
*
* Technologies langagieres interactives / Interactive Language Technologies
* Inst. de technologie de l'information / Institute for Information Technology
* Conseil national de recherches Canada / National Research Council Canada
* Copyright 2011, Sa Majeste la Reine du Chef du Canada /
* Copyright 2011, Her Majesty in Right of Canada
*/
#ifndef __PER_SENTENCE_STATS_H__
#define __PER_SENTENCE_STATS_H__
#include "basic_data_structure.h"
namespace Portage {
template <class ScoreMetric>
class perSentenceStats {
public:
double total;
Uint count;
public:
perSentenceStats()
: total(0.0f)
, count(0)
{}
perSentenceStats(const Translation& trans, const References& refs)
: total(0.0f)
, count(0)
{
init(trans, refs);
}
void init(const Translation& trans, const References& refs) {
total = ScoreMetric::convertToDisplay(ScoreMetric(trans, refs).score());
count = 1;
}
/// What is this metric's name.
/// @return Returns this metric's name => Amber.
static const char* const name() {
static const string name(string("perSentenceStats<") + ScoreMetric::name() + ">");
return name.c_str();
}
/**
*
* @param value internal value (eg, from score())
* @return display value.
*/
static double convertToDisplay(double value) {
return value;
}
/**
*
* @param value display value
* @return internal value.
*/
static double convertFromDisplay(double value) {
return value;
}
/**
* Convert "internal" score value to pnorm format: in [0,1],
* higher scores are better. Identical to convertToDisplay() for Amber,
* but not for WER/PER!
* @param value internal value (eg, from score())
* @return pnorm value
*/
static double convertToPnorm(double value) {
return convertToDisplay(value);
}
/**
* Convert "internal" score value from pnorm format: in [0,1],
* higher scores are better. Identical to convertFromDisplay() for Amber,
* but not for WER/PER!
* @param value pnorm value
* @return internal value
*/
static double convertFromPnorm(double value) {
return convertFromDisplay(value);
}
double score() const { return (count > 0 ? total / count : 0.0f); }
/**
* Prints the ngram count and match length in a human readable format to out.
* @param out output stream defaults to cout.
*/
void output(ostream &out = cout) const {
out << "total: " << total << endl;
out << "count: " << count << endl;
out << "Score: " << score() << endl;
}
/**
* Prints the ngrams count and match length so that it can be reread.
* @param out output stream mainly a file.
*/
void write(ostream &out) const {
out << total << "\t" << count << endl;
}
/**
* Finds the difference in statistics between two BLEUstats objects.
* @relates BLEUstats
* @param other right-hand side operand
* @return Returns a BLEUstats containing this - other
*/
perSentenceStats<ScoreMetric>& operator-=(const perSentenceStats<ScoreMetric>& other) {
total -= other.total;
count -= other.count;
return *this;
}
/**
* Adds together the statistics of two BLEUstats objects, returning
* the result.
* @relates BLEUstats
* @param other right-hand side operand
* @return Returns a BLEUstats containing this + other
*/
perSentenceStats<ScoreMetric>& operator+=(const perSentenceStats<ScoreMetric>& other) {
total += other.total;
count += other.count;
return *this;
}
/// Callable entity for booststrap confidence interval.
struct CIcomputer
{
/// Define what is an iterator for a CIcomputer.
typedef typename vector<perSentenceStats<ScoreMetric> >::const_iterator iterator;
/**
* Cumulates all BLEUstats from the range.
* @param begin start iterator
* @param end end iterator
* @return Returns the Amber score [0 1] once the BLEU stats are all cumulated for the range.
*/
double operator()(iterator begin, iterator end) {
perSentenceStats<ScoreMetric> total;
return std::accumulate(begin, end, total).score();
}
};
};
template <class ScoreMetric>
perSentenceStats<ScoreMetric> operator-(const perSentenceStats<ScoreMetric>& s1, const perSentenceStats<ScoreMetric>& s2) {
perSentenceStats<ScoreMetric> result(s1);
result -= s2;
return result;
}
template <class ScoreMetric>
perSentenceStats<ScoreMetric> operator+(const perSentenceStats<ScoreMetric>& s1, const perSentenceStats<ScoreMetric>& s2) {
perSentenceStats<ScoreMetric> result(s1);
result += s2;
return result;
}
template <class ScoreMetric>
bool operator==(const perSentenceStats<ScoreMetric>& s1, const perSentenceStats<ScoreMetric>& s2) {
return s1.total == s2.total && s1.count == s2.count;
}
template <class ScoreMetric>
bool operator!=(const perSentenceStats<ScoreMetric>& s1, const perSentenceStats<ScoreMetric>& s2) {
return !(s1 == s2);
}
/**
* Scale ScoreMetric by a constant.
*/
template <class ScoreMetric>
perSentenceStats<ScoreMetric> operator*(perSentenceStats<ScoreMetric> &s, double c) {
s.total *= c;
return s;
}
template <class ScoreMetric>
void computeArrayRow(vector<perSentenceStats<ScoreMetric> >& scores,
const Nbest& nbest,
const References& refs,
Uint max)
{
const Uint K = min(max, nbest.size());
scores.resize(K);
Voc voc;
vector<vector<Uint> > nbest_uint;
tokenize(nbest, voc, nbest_uint);
vector<vector<Uint> > refs_uint;
tokenize(refs, voc, refs_uint);
int k;
#pragma omp parallel for private(k)
for (k=0; k<(int)K; ++k) {
scores[k].init(nbest_uint[k], refs_uint);
}
}
} // ends namespace Portage
#endif // __PER_SENTENCE_STATS_H__
|
dgelqs.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgelqs.c, normal z -> d, Fri Sep 28 17:38:05 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gelqs
*
* Computes a minimum-norm solution min | A*X - B | using the
* LQ factorization A = L*Q computed by plasma_dgelqf.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= m >= 0.
*
* @param[in] nrhs
* The number of columns of B. nrhs >= 0.
*
* @param[in] pA
* Details of the LQ factorization of the original matrix A as returned
* by plasma_dgelqf.
*
* @param[in] lda
* The leading dimension of the array A. lda >= m.
*
* @param[in] T
* Auxiliary factorization data, computed by plasma_dgelqf.
*
* @param[in,out] pB
* On entry, pointer to the m-by-nrhs right hand side matrix B.
* On exit, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= n.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_dgelqs
* @sa plasma_cgelqs
* @sa plasma_dgelqs
* @sa plasma_sgelqs
* @sa plasma_dgelqf
*
******************************************************************************/
int plasma_dgelqs(int m, int n, int nrhs,
double *pA, int lda,
plasma_desc_t T,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0 || m > n) {
plasma_error("illegal value of n");
return -2;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -3;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -5;
}
if (ldb < imax(1, imax(1, n))) {
plasma_error("illegal value of ldb");
return -8;
}
// quick return
if (m == 0 || n == 0 || nrhs == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gelqf(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = ib*nb; // unmlq: work
retval = plasma_workspace_create(&work, lwork, PlasmaRealDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_dgelqs(A, T, B, work, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gelqs
*
* Computes a minimum-norm solution using previously computed LQ factorization.
* Non-blocking tile version of plasma_dgelqs().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[in] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by plasma_dgelqf.
*
* @param[in,out] B
* Descriptor of matrix B.
* On entry, right-hand side matrix B in the tile layout.
* On exit, solution matrix X in the tile layout.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For multiplication by Q contains preallocated space for work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dgelqs
* @sa plasma_omp_cgelqs
* @sa plasma_omp_dgelqs
* @sa plasma_omp_sgelqs
* @sa plasma_omp_dgelqf
*
******************************************************************************/
void plasma_omp_dgelqs(plasma_desc_t A, plasma_desc_t T,
plasma_desc_t B, plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid descriptor A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid descriptor T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid descriptor B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || B.n == 0)
return;
// Zero the trailing block of the right-hand-side matrix.
// B has less rows than X.
plasma_pdlaset(PlasmaGeneral, 0.0, 0.0,
plasma_desc_view(B, A.m, 0, A.n - A.m, B.n),
sequence, request);
// Solve L * Y = B.
plasma_pdtrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit,
1.0, plasma_desc_view(A, 0, 0, A.m, A.m),
plasma_desc_view(B, 0, 0, A.m, B.n),
sequence, request);
// Find X = Q^T * Y.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pdormlq_tree(PlasmaLeft, PlasmaTrans,
A, T, B, work,
sequence, request);
}
else {
plasma_pdormlq(PlasmaLeft, PlasmaTrans,
A, T, B, work,
sequence, request);
}
}
|
blt.c | /*
BACON:
Implements the semiparametric autoregressive model for
radiocarbon chronologies, using the twalk. See paper for mathematical details and
the files:
- bacon.h: This is the implementation, with the model definitions etc.
- cal.h: Reads and manages calibration curves and determinations
- input.h, input.c: reads the input files and stores all data
- ranfun.h, twalk.h, Matrix.h: for some gsl interfaces for random number generation, the C++ twalk implementation and a simple Matrix class.
*/
#include <stdio.h>
#include <math.h>
#include <unistd.h>
#include <string.h>
#include "bacon.h"
#include "input.h"
#include "ranfun.h"
#include "blt.h"
int main(int argc, char *argv[]) {
// Command line: blt inputfile outputfile ssize
if (argc < 4) {
printf("Usage: blt inputfile outputfile ssize\n");
exit(0);
}
printf("blt: THIS IS THE BLT\n\n");
blt All(argv[1]);
//Sample size for y ... each core is run for an effective sample size of 10*ssize
//but is subsampled 10 times less often, to btain a similar sample size.
int ssize;
sscanf( argv[3], " %d", &ssize);
//ssize is the final sample size needed
//ssize = it/(ACCEP_EV * All.Dim() * EVERY_MULT) - BURN_IN_MULT
//Then we let
// int it = (ACCEP_EV * All[0]->Dim() * EVERY_MULT * (ssize + BURN_IN_MULT));
int it, every;
//run in parallel using the -fopenmp compiler option
#pragma omp parallel for
for (int c=0; c<All.GetT(); c++) {
//Run the twalk
it = (ACCEP_EV * All.Dim(c) * EVERY_MULT * (0 + BURN_IN_MULT));
every = -1*EVERY_MULT*All.Dim(c);// only accepted iterations are saved
printf("blt: %d iterations in core %s\n", abs(it), All.GetCoreName(c));
All.RunTwalk( c, it, every, "w+", 0);
}
//Output file for the blt
FILE *outy;
if ((outy = fopen( argv[2], "w+")) == NULL)
{
printf("Could not open %s for writing\n", "ysamples.out");
exit(-1);
}
//Header for tne blt output file
for (int j=0; j<All.Getn(); j++)
fprintf( outy, "%s ", All.GetMarkerName(j));
fprintf( outy, "\n");
printf("blt: Sampling from cores and y\n\n");
int N=ssize;
double mean, tau, y;
for (int k=0; k<N; k++) {
#pragma omp parallel for
for (int c=0; c<All.GetT(); c++) {
it = (ACCEP_EV * All.Dim(c) * EVERY_MULT * ( 10 ));
every= -1*EVERY_MULT * 10 *All.Dim(c);// only accepted iterations are saved
if ((k % 10) == 0)
printf("blt: %d iterations in core %s\n", abs(it), All.GetCoreName(c));
//Run the twalk, if no initial points are given,
//then the previous last points are used.
All.RunTwalk( c, it, every, "a", 1); //append the new samples
}
//Calculate the mean for each marker and update y
for (int j=0; j<All.Getn(); j++) {
mean = All.GetMeanTh(j);
//Update the y
tau = All.GetTau0(j) + All.GetSz(j)*All.GetTau1(j);
y = NorSim( (All.GetTau0(j)*All.GetMu0(j) + All.GetSz(j)*All.GetTau1(j)*mean)/tau , sqrt(1.0/tau) );
//set it in all cores
//printf("Marker %d: %f %f %f %f %f %f ", j, mean, tau, All.GetTau0(j), All.GetSz(j), All.GetTau1(j), y);
for (int c=0; c<All.GetT(); c++) {
if (All.ExistsMarker( j, c)) {
All.SetY( j, y);
//printf("%f ", All.GetTh( j, c));
//fprintf( outy, "%7.1f ", All.GetTh( j, c));
}
}
//And save it
//printf("\n");
fprintf( outy, "%7.1f ", y);
}
fprintf( outy, "\n", y);
if ((k % 10) == 0)
printf("blt: %d iterations of %d done so far.\n\n", k+1 , N);
}
fclose(outy);
printf("blt: y samples in %s\n", argv[2]);
All.PrintNumWarnings();
for (int c=0; c<All.GetT(); c++) {
printf("blt: suggested burn in core %d = %d\n", c, All.Dim(c) * EVERY_MULT * BURN_IN_MULT);
}
printf(FAREWELL);
return 1;
}
|
binStruct.h | #ifndef binStruct_h
#define binStruct_h
#include "../../baseFunctions/fpBaseNode.h"
#include "../../baseFunctions/MWC.h"
#include "obsIndexAndClassVec.h"
#include "zipClassAndValue.h"
#include "processingNodeBin.h"
#include <vector>
#include <assert.h>
namespace fp{
template <typename T, typename Q>
class binStruct
{
protected:
float OOBAccuracy;
float correctOOB;
float totalOOB;
std::vector< fpBaseNode<T,Q> > bin;
std::vector<processingNodeBin<T,Q> > nodeQueue;
int numberOfNodes;
int numOfTreesInBin;
int currTree;
obsIndexAndClassVec indicesHolder;
std::vector<zipClassAndValue<int, T> > zipper;
std::vector<int> nodeIndices;
randomNumberRerFMWC randNum;
//obsIndexAndClassVec indexHolder(numClasses);
//std::vector<zipClassAndValue<int, float> > zipVec(testSize);
inline bool rightNode(){
return false;
}
inline bool leftNode(){
return true;
}
public:
binStruct() : OOBAccuracy(-1.0),correctOOB(0),totalOOB(0),numberOfNodes(0),numOfTreesInBin(0),currTree(0), indicesHolder(fpSingleton::getSingleton().returnNumClasses()){ }
inline void loadFirstNode(){
//inline void loadFirstNode(obsIndexAndClassVec& indicesHolder, std::vector<zipClassAndValue<int, T> >& zipper){
nodeQueue.emplace_back(0,0,0,randNum);
nodeQueue.back().setupRoot(indicesHolder, zipper);
nodeQueue.back().processNode();
if(nodeQueue.back().isLeafNode()){
makeRootALeaf();
}else{
copyProcessedRootToBin();
createRootChildNodes();
}
}
inline void makeRootALeaf(){
bin[returnRootLocation()].setClass(nodeQueue.back().returnNodeClass());
bin[returnRootLocation()].setDepth(0);
}
inline void setSharedVectors(obsIndexAndClassVec& indicesInNode){
indicesInNode.resetVectors();
int numUnusedObs = fpSingleton::getSingleton().returnNumObservations();
int randomObsID;
int tempMoveObs;
for(int n = 0; n < fpSingleton::getSingleton().returnNumObservations(); n++){
randomObsID = randNum.gen(fpSingleton::getSingleton().returnNumObservations());
indicesInNode.insertIndex(nodeIndices[randomObsID], fpSingleton::getSingleton().returnLabel(nodeIndices[randomObsID]));
if(randomObsID < numUnusedObs){
--numUnusedObs;
tempMoveObs = nodeIndices[numUnusedObs];
nodeIndices[numUnusedObs] = nodeIndices[randomObsID];
nodeIndices[randomObsID] = tempMoveObs;
}
}
}
inline bool shouldProcessNode(){
return !nodeQueue.back().isLeafNode();
}
inline int positionOfNextNode(){
return (int)bin.size()-1;
}
inline int parentNodesPosition(){
return (int)bin.size()-1;
}
inline void makeLeafNodes(){
for(int i= 0; i < fpSingleton::getSingleton().returnNumClasses(); ++i){
bin[i].setSharedClass(i);
}
}
inline int returnDepthOfNode(){
assert(!nodeQueue.empty());
return bin[nodeQueue.back().returnParentNodeNumber()].returnDepth()+1;
}
inline void copyProcessedNodeToBin(){
bin.emplace_back(nodeQueue.back().returnNodeCutValue(), returnDepthOfNode(), nodeQueue.back().returnNodeCutFeature());
}
inline void copyProcessedRootToBin(){
bin[returnRootLocation()].setCutValue(nodeQueue.back().returnNodeCutValue());
bin[returnRootLocation()].setDepth(0);
bin[returnRootLocation()].setFeatureValue(nodeQueue.back().returnNodeCutFeature());
}
inline int returnRootLocation(){
return currTree+fpSingleton::getSingleton().returnNumClasses();
}
inline void linkParentToChild(){
if(nodeQueue.back().returnIsLeftNode()){
bin[nodeQueue.back().returnParentNodeNumber()].setLeftValue(positionOfNextNode());
}else{
bin[nodeQueue.back().returnParentNodeNumber()].setRightValue(positionOfNextNode());
}
}
inline void linkParentToLeaf(){
assert(nodeQueue.back().returnParentNodeNumber() >= fpSingleton::getSingleton().returnNumClasses());
assert(nodeQueue.back().returnParentNodeNumber() <= parentNodesPosition());
assert(nodeQueue.back().returnNodeClass() >= 0);
assert(nodeQueue.back().returnNodeClass() < fpSingleton::getSingleton().returnNumClasses());
if(nodeQueue.back().returnIsLeftNode()){
bin[nodeQueue.back().returnParentNodeNumber()].setLeftValue(nodeQueue.back().returnNodeClass());
}else{
bin[nodeQueue.back().returnParentNodeNumber()].setRightValue(nodeQueue.back().returnNodeClass());
}
}
inline void createChildNodes(){
nodeIterators nodeIts(nodeQueue.back().returnNodeIterators());
zipperIterators<int,T> zipIts(nodeQueue.back().returnZipIterators());
int childDepth = returnDepthOfNode()+1;
if(nodeQueue.back().isLeftChildLarger()){
nodeQueue.pop_back();
//TODO: don't emplace_back if should be leaf node.
nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum);
nodeQueue.back().setupNode(nodeIts, zipIts, rightNode());
nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum);
nodeQueue.back().setupNode(nodeIts, zipIts, leftNode());
}else{
nodeQueue.pop_back();
nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum);
nodeQueue.back().setupNode(nodeIts, zipIts, leftNode());
nodeQueue.emplace_back(1,parentNodesPosition(), childDepth, randNum);
nodeQueue.back().setupNode(nodeIts, zipIts, rightNode());
}
}
inline void createRootChildNodes(){
nodeIterators nodeIts(nodeQueue.back().returnNodeIterators());
zipperIterators<int,T> zipIts(nodeQueue.back().returnZipIterators());
int childDepth = returnDepthOfNode()+1;
if(nodeQueue.back().isLeftChildLarger()){
nodeQueue.pop_back();
//TODO: don't emplace_back if should be leaf node.
nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum);
nodeQueue.back().setupNode(nodeIts, zipIts, rightNode());
nodeQueue.emplace_back(1,returnRootLocation(), childDepth, randNum);
nodeQueue.back().setupNode(nodeIts, zipIts, leftNode());
}else{
nodeQueue.pop_back();
nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum);
nodeQueue.back().setupNode(nodeIts, zipIts, leftNode());
nodeQueue.emplace_back(1,returnRootLocation(), childDepth,randNum);
nodeQueue.back().setupNode(nodeIts, zipIts, rightNode());
}
}
inline void processLeafNode(){
assert(nodeQueue.back().returnNodeSize() > 0);
assert(nodeQueue.back().returnNodeSize() <= fpSingleton::getSingleton().returnNumObservations());
linkParentToLeaf();
nodeQueue.pop_back();
}
inline int returnNumTrees(){
return numOfTreesInBin;
}
inline void processInternalNode(){
copyProcessedNodeToBin();
linkParentToChild();
createChildNodes();
}
inline void processNode(){
// process the node, i.e. calculate best split, ...
nodeQueue.back().processNode();
if (nodeQueue.back().isLeafNode()) {
// label the processed node as a leaf.
processLeafNode();
}
else {
// label the processed node as internal.
processInternalNode();
}
}
inline void createBin(int numTrees, int randSeed){
numOfTreesInBin = numTrees;
randNum.initialize(randSeed);
initializeStructures();
for(; currTree < numOfTreesInBin; ++currTree){
setSharedVectors(indicesHolder);
loadFirstNode();
while(!nodeQueue.empty()){
processNode();
}
}
removeStructures();
}
inline void initializeStructures(){
zipper.resize(fpSingleton::getSingleton().returnNumObservations());
nodeIndices.resize(fpSingleton::getSingleton().returnNumObservations());
for(int i = 0; i < fpSingleton::getSingleton().returnNumObservations(); ++i){
nodeIndices[i] =i;
}
bin.resize(numOfTreesInBin+fpSingleton::getSingleton().returnNumClasses());
makeLeafNodes();
}
inline void removeStructures(){
std::vector<processingNodeBin<T,Q> >().swap( nodeQueue );
//indicesHolder.removeObsIndexAndClassVec();
std::vector<zipClassAndValue<int, T> >().swap( zipper );
std::vector<int>().swap( nodeIndices);
}
inline int returnMaxDepth(){
int maxDepth=0;
for(auto& node : bin){
// +1 accounts for the leaf nodes which are never created (optimization that cuts memory required for a forest in half)
if(maxDepth < node.returnDepth()+1){
maxDepth = node.returnDepth()+1;
}
}
return maxDepth;
}
inline int returnNumLeafNodes(){
return (int)bin.size() - fpSingleton::getSingleton().returnNumClasses() + numOfTreesInBin;
}
inline int returnLeafDepthSum(){
int leafDepthSums=0;
for(auto& node : bin){
if(node.isInternalNodeFront()){
if(node.returnLeftNodeID() < fpSingleton::getSingleton().returnNumClasses()){
leafDepthSums += node.returnDepth()+1;
}
if(node.returnRightNodeID() < fpSingleton::getSingleton().returnNumClasses()){
leafDepthSums += node.returnDepth()+1;
}
}
}
return leafDepthSums;
}
/////////////////////////
// This is required to template the predictObservation function
// //////////////////////////////
template<typename U>
struct identity { typedef U type; };
inline void predictBinObservation(int observationNum, std::vector<int>& preds){
predictBinObservation(observationNum,preds, identity<Q>());
}
inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds){
predictBinObservation(observation,preds,identity<Q>());
}
////////////////////////////////
//PredictForRF
inline void predictBinObservation(int observationNum,std::vector<int>& preds, identity<int> ){
std::vector<int> currNode(numOfTreesInBin);
int numberNotInLeaf;
int featureNum;
T featureVal;
int q;
for( q=0; q<numOfTreesInBin; ++q){
currNode[q] = q+fpSingleton::getSingleton().returnNumClasses();
__builtin_prefetch(&bin[currNode[q]], 0, 3);
}
do{
numberNotInLeaf = 0;
for( q=0; q<numOfTreesInBin; ++q){
if(bin[currNode[q]].isInternalNodeFront()){
featureNum = bin[currNode[q]].returnFeatureNumber();
featureVal = fpSingleton::getSingleton().returnTestFeatureVal(featureNum,observationNum);
currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal);
__builtin_prefetch(&bin[currNode[q]], 0, 3);
++numberNotInLeaf;
}
}
}while(numberNotInLeaf);
for( q=0; q<numOfTreesInBin; q++){
#pragma omp atomic update
++preds[bin[currNode[q]].returnClass()];
}
}
inline void predictBinObservation(int observationNum, std::vector<int>& preds, identity<std::vector<int> >){
std::vector<int> currNode(numOfTreesInBin);
int numberNotInLeaf;
T featureVal;
int q;
for( q=0; q<numOfTreesInBin; ++q){
currNode[q] = q+fpSingleton::getSingleton().returnNumClasses();
__builtin_prefetch(&bin[currNode[q]], 0, 3);
}
do{
numberNotInLeaf = 0;
for( q=0; q<numOfTreesInBin; ++q){
if(bin[currNode[q]].isInternalNodeFront()){
featureVal = 0;
for(auto i : bin[currNode[q]].returnFeatureNumber()){
featureVal += fpSingleton::getSingleton().returnTestFeatureVal(i,observationNum);
}
currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal);
__builtin_prefetch(&bin[currNode[q]], 0, 3);
++numberNotInLeaf;
}
}
}while(numberNotInLeaf);
for( q=0; q<numOfTreesInBin; q++){
#pragma omp atomic update
++preds[bin[currNode[q]].returnClass()];
}
}
inline void predictBinObservation(int observationNum, std::vector<int>& preds, identity<weightedFeature>){
std::vector<int> currNode(numOfTreesInBin);
int numberNotInLeaf;
T featureVal;
int weightNum;
int q;
for( q=0; q<numOfTreesInBin; ++q){
currNode[q] = q+fpSingleton::getSingleton().returnNumClasses();
__builtin_prefetch(&bin[currNode[q]], 0, 3);
}
do{
numberNotInLeaf = 0;
for( q=0; q<numOfTreesInBin; ++q){
if(bin[currNode[q]].isInternalNodeFront()){
featureVal = 0;
weightNum = 0;
for(auto i : bin[currNode[q]].returnFeatureNumber().returnFeatures()){
featureVal += fpSingleton::getSingleton().returnTestFeatureVal(i,observationNum)*bin[currNode[q]].returnFeatureNumber().returnWeights()[weightNum++];
}
currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal);
__builtin_prefetch(&bin[currNode[q]], 0, 3);
++numberNotInLeaf;
}
}
}while(numberNotInLeaf);
for( q=0; q<numOfTreesInBin; q++){
#pragma omp atomic update
++preds[bin[currNode[q]].returnClass()];
}
}
inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds,identity<int> ){
std::vector<int> currNode(numOfTreesInBin);
int numberNotInLeaf;
int featureNum;
int q;
for( q=0; q<numOfTreesInBin; ++q){
currNode[q] = q+fpSingleton::getSingleton().returnNumClasses();
__builtin_prefetch(&bin[currNode[q]], 0, 3);
}
do{
numberNotInLeaf = 0;
for( q=0; q<numOfTreesInBin; ++q){
if(bin[currNode[q]].isInternalNodeFront()){
featureNum = bin[currNode[q]].returnFeatureNumber();
currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(observation[featureNum]);
__builtin_prefetch(&bin[currNode[q]], 0, 3);
++numberNotInLeaf;
}
}
}while(numberNotInLeaf);
for( q=0; q<numOfTreesInBin; q++){
#pragma omp atomic update
++preds[bin[currNode[q]].returnClass()];
}
}
inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds, identity<std::vector<int> >){
std::vector<int> currNode(numOfTreesInBin);
int numberNotInLeaf;
T featureVal;
int q;
for( q=0; q<numOfTreesInBin; ++q){
currNode[q] = q+fpSingleton::getSingleton().returnNumClasses();
__builtin_prefetch(&bin[currNode[q]], 0, 3);
}
do{
numberNotInLeaf = 0;
for( q=0; q<numOfTreesInBin; ++q){
if(bin[currNode[q]].isInternalNodeFront()){
featureVal = 0;
for(auto i : bin[currNode[q]].returnFeatureNumber()){
featureVal +=observation[i];
}
currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal);
__builtin_prefetch(&bin[currNode[q]], 0, 3);
++numberNotInLeaf;
}
}
}while(numberNotInLeaf);
for( q=0; q<numOfTreesInBin; q++){
#pragma omp atomic update
++preds[bin[currNode[q]].returnClass()];
}
}
//Prediction function for ternary sparse matrix
inline void predictBinObservation(std::vector<T>& observation, std::vector<int>& preds, identity<weightedFeature>){
std::vector<int> currNode(numOfTreesInBin);
int numberNotInLeaf;
T featureVal;
int weightNum;
int q;
for( q=0; q<numOfTreesInBin; ++q){
currNode[q] = q+fpSingleton::getSingleton().returnNumClasses();
__builtin_prefetch(&bin[currNode[q]], 0, 3);
}
do{
numberNotInLeaf = 0;
for( q=0; q<numOfTreesInBin; ++q){
if(bin[currNode[q]].isInternalNodeFront()){
featureVal = 0;
weightNum = 0;
for(auto i : bin[currNode[q]].returnFeatureNumber().returnFeatures()){
featureVal +=observation[i]*bin[currNode[q]].returnFeatureNumber().returnWeights()[weightNum++];
}
currNode[q] = bin[currNode[q]].fpBaseNode<T, Q>::nextNode(featureVal);
__builtin_prefetch(&bin[currNode[q]], 0, 3);
++numberNotInLeaf;
}
}
}while(numberNotInLeaf);
for( q=0; q<numOfTreesInBin; q++){
#pragma omp atomic update
++preds[bin[currNode[q]].returnClass()];
}
}
///////////////////////////////////
/// Test Functions not to be used in production
//////////////////////////////////
inline std::vector< fpBaseNode<T,Q> >& exposeBinTest(){
return bin;
}
void printBin(){
std::cout << "\n";
for(auto nd : bin){
nd.printNode();
}
}
};
}//fp
#endif //binStruct_h
|
elect_energy_vec.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <immintrin.h>
#include <limits.h>
int main(int argc, char **argv) {
struct timespec ts_start, ts_end;
int size = 60;
int n_charges = size*size*size;
float scale=0.5;
float *charge, *x, *y, *z;
int i,j,k;
float time_total;
__m256 *X, *Y, *Z, *Q;
__m256 tmpQ[8], tmpX[8], tmpY[8], tmpZ[8];
X = aligned_alloc(32, (n_charges+8) * sizeof(float));
Y = aligned_alloc(32, (n_charges+8) * sizeof(float));
Z = aligned_alloc(32, (n_charges+8) * sizeof(float));
Q = aligned_alloc(32, (n_charges+8) * sizeof(float));
charge=malloc(n_charges*sizeof(float));
x=malloc(n_charges*sizeof(float));
y=malloc(n_charges*sizeof(float));
z=malloc(n_charges*sizeof(float));
/* initialize x,y,z coordinates and charges */
int n=0;
for (i=0; i<size; i++)
for (j=0; j<size; j++)
for (k=0; k<size; k++) {
x[n]=i*scale;
y[n]=j*scale;
z[n]=k*scale;
charge[n]=0.33;
n++;
}
/* Initialize X,Y,Z,Q arrays with 256-bit long vectors */
int ix,iy,iz;
float tmp_vec[4][8] __attribute__ ((aligned (32)));
long v_element_count = 0;
long v_count = 0;
for (ix=0; ix<size; ix++)
for (iy=0; iy<size; iy++)
for (iz=0; iz<size; iz++)
{
tmp_vec[0][v_element_count] = ix*scale;
tmp_vec[1][v_element_count] = iy*scale;
tmp_vec[2][v_element_count] = iz*scale;
tmp_vec[3][v_element_count] = 0.33;
v_element_count++;
if ( v_element_count == 8 ) {
X[v_count] = _mm256_set_ps( \
tmp_vec[0][7],tmp_vec[0][6],tmp_vec[0][5],tmp_vec[0][4], \
tmp_vec[0][3],tmp_vec[0][2],tmp_vec[0][1],tmp_vec[0][0]);
Y[v_count] = _mm256_set_ps( \
tmp_vec[1][7],tmp_vec[1][6],tmp_vec[1][5],tmp_vec[1][4], \
tmp_vec[1][3],tmp_vec[1][2],tmp_vec[1][1],tmp_vec[1][0]);
Z[v_count] = _mm256_set_ps( \
tmp_vec[2][7],tmp_vec[2][6],tmp_vec[2][5],tmp_vec[2][4], \
tmp_vec[2][3],tmp_vec[2][2],tmp_vec[2][1],tmp_vec[2][0]);
Q[v_count] = _mm256_set_ps( \
tmp_vec[3][7],tmp_vec[3][6],tmp_vec[3][5],tmp_vec[3][4], \
tmp_vec[3][3],tmp_vec[3][2],tmp_vec[3][1],tmp_vec[3][0]);
v_count++;
v_element_count=0;
memset(tmp_vec,0,32*sizeof(float));
}
}
/* Treat the remainder. Pad with 0's */
if ( v_element_count !=0 ) {
X[v_count] = _mm256_set_ps( \
tmp_vec[0][7],tmp_vec[0][6],tmp_vec[0][5],tmp_vec[0][4], \
tmp_vec[0][3],tmp_vec[0][2],tmp_vec[0][1],tmp_vec[0][0]);
Y[v_count] = _mm256_set_ps( \
tmp_vec[1][7],tmp_vec[1][6],tmp_vec[1][5],tmp_vec[1][4], \
tmp_vec[1][3],tmp_vec[1][2],tmp_vec[1][1],tmp_vec[1][0]);
Z[v_count] = _mm256_set_ps( \
tmp_vec[2][7],tmp_vec[2][6],tmp_vec[2][5],tmp_vec[2][4], \
tmp_vec[2][3],tmp_vec[2][2],tmp_vec[2][1],tmp_vec[2][0]);
Q[v_count] = _mm256_set_ps( \
tmp_vec[3][7],tmp_vec[3][6],tmp_vec[3][5],tmp_vec[3][4], \
tmp_vec[3][3],tmp_vec[3][2],tmp_vec[3][1],tmp_vec[3][0]);
v_count++;
}
double VC=0;
__m256 r_vec, result, vcps, dist_check,R_CUTOFF;
__m256 diff[8];
R_CUTOFF = _mm256_set1_ps(scale*scale);
float tmp_add[4][8] __attribute__ ((aligned (32)));
int m;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
#pragma omp parallel for private(tmpQ,tmpX,tmpY,tmpZ,i,j,m,diff,r_vec,vcps,tmp_add,result,dist_check) reduction(+:VC) schedule(dynamic)
for(i=0; i<v_count; i++) {
tmpQ[0] = _mm256_broadcast_ss(&Q[i][0]);
tmpQ[1] = _mm256_broadcast_ss(&Q[i][1]);
tmpQ[2] = _mm256_broadcast_ss(&Q[i][2]);
tmpQ[3] = _mm256_broadcast_ss(&Q[i][3]);
tmpQ[4] = _mm256_broadcast_ss(&Q[i][4]);
tmpQ[5] = _mm256_broadcast_ss(&Q[i][5]);
tmpQ[6] = _mm256_broadcast_ss(&Q[i][6]);
tmpQ[7] = _mm256_broadcast_ss(&Q[i][7]);
tmpX[0] = _mm256_broadcast_ss(&X[i][0]);
tmpX[1] = _mm256_broadcast_ss(&X[i][1]);
tmpX[2] = _mm256_broadcast_ss(&X[i][2]);
tmpX[3] = _mm256_broadcast_ss(&X[i][3]);
tmpX[4] = _mm256_broadcast_ss(&X[i][4]);
tmpX[5] = _mm256_broadcast_ss(&X[i][5]);
tmpX[6] = _mm256_broadcast_ss(&X[i][6]);
tmpX[7] = _mm256_broadcast_ss(&X[i][7]);
tmpY[0] = _mm256_broadcast_ss(&Y[i][0]);
tmpY[1] = _mm256_broadcast_ss(&Y[i][1]);
tmpY[2] = _mm256_broadcast_ss(&Y[i][2]);
tmpY[3] = _mm256_broadcast_ss(&Y[i][3]);
tmpY[4] = _mm256_broadcast_ss(&Y[i][4]);
tmpY[5] = _mm256_broadcast_ss(&Y[i][5]);
tmpY[6] = _mm256_broadcast_ss(&Y[i][6]);
tmpY[7] = _mm256_broadcast_ss(&Y[i][7]);
tmpZ[0] = _mm256_broadcast_ss(&Z[i][0]);
tmpZ[1] = _mm256_broadcast_ss(&Z[i][1]);
tmpZ[2] = _mm256_broadcast_ss(&Z[i][2]);
tmpZ[3] = _mm256_broadcast_ss(&Z[i][3]);
tmpZ[4] = _mm256_broadcast_ss(&Z[i][4]);
tmpZ[5] = _mm256_broadcast_ss(&Z[i][5]);
tmpZ[6] = _mm256_broadcast_ss(&Z[i][6]);
tmpZ[7] = _mm256_broadcast_ss(&Z[i][7]);
for(m=0; m<8; m++)
{
for(j=0; j<v_count; j++)
{
vcps = _mm256_setzero_ps();
diff[0] = _mm256_sub_ps(tmpX[m],X[j]);
diff[1] = _mm256_sub_ps(tmpY[m],Y[j]);
diff[2] = _mm256_sub_ps(tmpZ[m],Z[j]);
diff[0] = _mm256_mul_ps(diff[0],diff[0]);
diff[1] = _mm256_mul_ps(diff[1],diff[1]);
diff[2] = _mm256_mul_ps(diff[2],diff[2]);
r_vec = _mm256_add_ps(diff[0],diff[1]);
r_vec = _mm256_add_ps(r_vec,diff[2]);
dist_check = _mm256_cmp_ps(r_vec,R_CUTOFF,29);
r_vec = _mm256_rsqrt_ps(r_vec);
result = _mm256_mul_ps(tmpQ[m],Q[j]);
result = _mm256_mul_ps(result,r_vec);
result = _mm256_and_ps(dist_check,result);
vcps = _mm256_add_ps(vcps,result); // Accumulate coupling between all voxels
_mm256_store_ps(tmp_add[0],vcps);
VC += tmp_add[0][0] + tmp_add[0][1] + tmp_add[0][2] + tmp_add[0][3] + tmp_add[0][4] + tmp_add[0][5] + tmp_add[0][6] + tmp_add[0][7];
}
}
}
// Calculate electrostatic energy: sum of charge[i]*charge[j]/dist[i,j] */
/* float dx, dy, dz, dist;
double Energy=0.0f;
#pragma omp parallel for private(j,dx,dy,dz,dist,n) reduction(+:Energy) schedule(static,50)
for (i = 0; i < n_charges; i++) {
for (j = i+1; j < n_charges; j++) {
dx = x[i]-x[j];
dy = y[i]-y[j];
dz = z[i]-z[j];
dist=sqrt(dx*dx + dy*dy + dz*dz);
Energy += charge[i]*charge[j]/dist;
}
}
*/
clock_gettime(CLOCK_MONOTONIC, &ts_end);
time_total = (ts_end.tv_sec - ts_start.tv_sec)*1000000000 + (ts_end.tv_nsec - ts_start.tv_nsec);
printf("\nTotal time is %f ms, Energy is %f %f\n", time_total/1000000,VC,VC*0.5);
printf("%i\n", v_count);
}
|
par_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
//fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
{
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
}
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
{
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpHE
* interpolation routine for hyperbolic PDEs
* treats weak fine connections like strong fine connections
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly influence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(A_ext);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildDirInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildDirInterpHost( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real diagonal;
HYPRE_Real sum_N_pos, sum_P_pos;
HYPRE_Real sum_N_neg, sum_P_neg;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
HYPRE_Int *P_marker, *P_marker_offd;
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
sum_N_pos = 0;
sum_N_neg = 0;
sum_P_pos = 0;
sum_P_neg = 0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (num_functions == 1 || dof_func[i1] == dof_func[i])
{
if (A_diag_data[jj] > 0)
sum_N_pos += A_diag_data[jj];
else
sum_N_neg += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
if (A_diag_data[jj] > 0)
sum_P_pos += A_diag_data[jj];
else
sum_P_neg += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])
{
if (A_offd_data[jj] > 0)
sum_N_pos += A_offd_data[jj];
else
sum_N_neg += A_offd_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
if (A_offd_data[jj] > 0)
sum_P_pos += A_offd_data[jj];
else
sum_P_neg += A_offd_data[jj];
}
}
}
if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;
if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
if (P_diag_data[jj]> 0)
P_diag_data[jj] *= -beta;
else
P_diag_data[jj] *= -alfa;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
if (P_offd_data[jj]> 0)
P_offd_data[jj] *= -beta;
else
P_offd_data[jj] *= -alfa;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
HYPRE_Int *P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
HYPRE_Int interp_type,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("DirInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildDirInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A,
interp_type, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildDirInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A, P_ptr);
}
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
/*------------------------------------------------
* Drop entries in interpolation matrix P
* max_elmts == 0 means no limit on rownnz
*------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
if (trunc_factor <= 0.0 && max_elmts == 0)
{
return 0;
}
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(P) );
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGInterpTruncationDevice(P, trunc_factor, max_elmts);
}
else
#endif
{
HYPRE_Int rescale = 1; // rescale P
HYPRE_Int nrm_type = 0; // Use infty-norm of row to perform treshold dropping
return hypre_ParCSRMatrixTruncate(P, trunc_factor, max_elmts, rescale, nrm_type);
}
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach.
* here we need to pass in a strength matrix built on the entire matrix.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
HERE, we only want to distribut to points of the SAME function type
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0 )
{
sum += A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
}
else /* sum = 0 - only add to diag if the same function type */
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal. (only if the same function type)
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
AGAIN, we only want to distribut to points of the SAME function type
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
}
else /* sum = 0 */
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGTruncandBuild
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd);
HYPRE_BigInt *new_col_map_offd;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int P_offd_size=0, new_num_cols_offd;
HYPRE_Int *P_marker;
HYPRE_Int i;
HYPRE_Int index;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_size = P_offd_i[n_fine];
}
new_num_cols_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < num_cols_offd; i++)
P_marker[i] = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
new_num_cols_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < new_num_cols_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
new_num_cols_offd);
}
index = 0;
for (i = 0; i < new_num_cols_offd; i++)
{
while (P_marker[index] == 0) index++;
new_col_map_offd[i] = col_map_offd[index];
index++;
}
if (P_offd_size) hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (new_num_cols_offd)
{
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(col_map_offd, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd;
}
if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P);
hypre_MatvecCommPkgCreate(P);
return hypre_error_flag;
}
hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A,
HYPRE_Real w)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_Real *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Real *C_offd_data;
HYPRE_Int *C_offd_i;
HYPRE_Int *C_offd_j;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int i, j, index;
HYPRE_Real invdiag;
HYPRE_Real w_local = w;
C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts,
row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]);
hypre_ParCSRMatrixInitialize(C);
C_diag = hypre_ParCSRMatrixDiag(C);
C_offd = hypre_ParCSRMatrixOffd(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C);
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
for (i=0; i < num_cols_offd; i++)
col_map_offd_C[i] = col_map_offd_A[i];
for (i=0; i < num_rows; i++)
{
index = A_diag_i[i];
invdiag = -w/A_diag_data[index];
C_diag_data[index] = 1.0-w;
C_diag_j[index] = A_diag_j[index];
if (w == 0)
{
w_local = fabs(A_diag_data[index]);
for (j = index+1; j < A_diag_i[i+1]; j++)
w_local += fabs(A_diag_data[j]);
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
w_local += fabs(A_offd_data[j]);
invdiag = -1/w_local;
C_diag_data[index] = 1.0-A_diag_data[index]/w_local;
}
C_diag_i[i] = index;
C_offd_i[i] = A_offd_i[i];
for (j = index+1; j < A_diag_i[i+1]; j++)
{
C_diag_data[j] = A_diag_data[j]*invdiag;
C_diag_j[j] = A_diag_j[j];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
C_offd_data[j] = A_offd_data[j]*invdiag;
C_offd_j[j] = A_offd_j[j];
}
}
C_diag_i[num_rows] = A_diag_i[num_rows];
C_offd_i[num_rows] = A_offd_i[num_rows];
return C;
}
/* RL */
HYPRE_Int
hypre_BoomerAMGBuildInterpOnePnt( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
/* csr's */
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
/* arrays */
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int num_cols_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
/* CF marker off-diag part */
HYPRE_Int *CF_marker_offd = NULL;
/* func type off-diag part */
HYPRE_Int *dof_func_offd = NULL;
/* nnz */
HYPRE_Int nnz_diag, nnz_offd, cnt_diag, cnt_offd;
HYPRE_Int *marker_diag, *marker_offd = NULL;
/* local size */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
/* number of C-pts */
HYPRE_Int n_cpts = 0;
/* fine to coarse mapping: diag part and offd part */
HYPRE_Int *fine_to_coarse;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_BigInt total_global_cpts, my_first_cpt;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_int_buf_data = NULL;
//HYPRE_Int col_start = hypre_ParCSRMatrixFirstRowIndex(A);
//HYPRE_Int col_end = col_start + n_fine;
HYPRE_Int i, j, i1, j1, k1, index, start;
HYPRE_Int *max_abs_cij;
char *max_abs_diag_offd;
HYPRE_Real max_abs_aij, vv;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
/* CF marker for the off-diag columns */
if (num_cols_A_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* function type indicator for the off-diag columns */
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* if CommPkg of A is not present, create it */
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* number of sends to do (number of procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* send buffer, of size send_map_starts[num_sends]),
* i.e., number of entries to send */
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
/* copy CF markers of elements to send to buffer
* RL: why copy them with two for loops? Why not just loop through all in one */
index = 0;
for (i = 0; i < num_sends; i++)
{
/* start pos of elements sent to send_proc[i] */
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
/* loop through all elems to send_proc[i] */
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
/* CF marker of send_map_elemts[j] */
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
/* create a handle to start communication. 11: for integer */
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd);
/* destroy the handle to finish communication */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* do a similar communication for dof_func */
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data,HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping,
* and find the most strongly influencing C-pt for each F-pt
*-----------------------------------------------------------------------*/
/* nnz in diag and offd parts */
cnt_diag = 0;
cnt_offd = 0;
max_abs_cij = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
max_abs_diag_offd = hypre_CTAlloc(char, n_fine,HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
/* markers initialized as zeros */
marker_diag = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
//fine_to_coarse[i] = my_first_cpt + n_cpts;
fine_to_coarse[i] = n_cpts;
n_cpts++;
continue;
}
/* mark all the strong connections: in S */
HYPRE_Int MARK = i + 1;
/* loop through row i of S, diag part */
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
marker_diag[S_diag_j[j]] = MARK;
}
/* loop through row i of S, offd part */
if (num_procs > 1)
{
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
j1 = col_offd_S_to_A ? col_offd_S_to_A[S_offd_j[j]] : S_offd_j[j];
marker_offd[j1] = MARK;
}
}
fine_to_coarse[i] = -1;
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
* Find this C-pt and save it
*--------------------------------------------------------------------------*/
/* if we failed to find any strong C-pt, mark this point as an 'n' */
char marker = 'n';
/* max abs val */
max_abs_aij = -1.0;
/* loop through row i of A, diag part */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
i1 = A_diag_j[j];
vv = fabs(A_diag_data[j]);
#if 0
/* !!! this is a hack just for code verification purpose !!!
it basically says:
1. if we see |a_ij| < 1e-14, force it to be 1e-14
2. if we see |a_ij| == the max(|a_ij|) so far exactly,
replace it if the j idx is smaller
Reasons:
1. numerical round-off for eps-level values
2. entries in CSR rows may be listed in different orders
*/
vv = vv < 1e-14 ? 1e-14 : vv;
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK &&
vv == max_abs_aij && i1 < max_abs_cij[i])
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
continue;
}
#endif
/* it is a strong C-pt and has abs val larger than what have seen */
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv > max_abs_aij)
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
/* offd part */
if (num_procs > 1)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
i1 = A_offd_j[j];
vv = fabs(A_offd_data[j]);
if (CF_marker_offd[i1] >= 0 && marker_offd[i1] == MARK && vv > max_abs_aij)
{
/* mark it as an 'o' */
marker = 'o';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
}
max_abs_diag_offd[i] = marker;
if (marker == 'd')
{
cnt_diag ++;
}
else if (marker == 'o')
{
cnt_offd ++;
}
}
nnz_diag = cnt_diag + n_cpts;
nnz_offd = cnt_offd;
/*------------- allocate arrays */
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag,HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, nnz_diag,HYPRE_MEMORY_HOST);
/* not in ``if num_procs > 1'',
* allocation needed even for empty CSR */
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd,HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, nnz_offd,HYPRE_MEMORY_HOST);
/* redundant */
P_diag_i[0] = 0;
P_offd_i[0] = 0;
/* reset counters */
cnt_diag = 0;
cnt_offd = 0;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd,HYPRE_MEMORY_HOST);
big_int_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
big_int_buf_data[index++] = my_first_cpt
+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* Second Pass: Populate P
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] >= 0)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity.
*--------------------------------------------------------------------*/
//P_diag_j[cnt_diag] = fine_to_coarse[i] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[i];
P_diag_data[cnt_diag++] = 1.0;
}
else
{
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
*--------------------------------------------------------------------------*/
if (max_abs_diag_offd[i] == 'd')
{
/* on diag part of P */
j = max_abs_cij[i];
//P_diag_j[cnt_diag] = fine_to_coarse[j] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[j];
P_diag_data[cnt_diag++] = 1.0;
}
else if (max_abs_diag_offd[i] == 'o')
{
/* on offd part of P */
j = max_abs_cij[i];
P_offd_j[cnt_offd] = j;
P_offd_data[cnt_offd++] = 1.0;
}
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
hypre_assert(cnt_diag == nnz_diag);
hypre_assert(cnt_offd == nnz_offd);
/* num of cols in the offd part of P */
num_cols_offd_P = 0;
/* marker_offd: all -1 */
for (i = 0; i < num_cols_A_offd; i++)
{
marker_offd[i] = -1;
}
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
if (marker_offd[i1] == -1)
{
num_cols_offd_P++;
marker_offd[i1] = 1;
}
}
/* col_map_offd_P: the col indices of the offd of P
* we first keep them be the offd-idx of A */
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P,HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P,HYPRE_MEMORY_HOST);
for (i = 0, i1 = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i] == 1)
{
tmp_map_offd[i1++] = i;
}
}
hypre_assert(i1 == num_cols_offd_P);
/* now, adjust P_offd_j to local idx w.r.t col_map_offd_R
* by searching */
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
k1 = hypre_BinarySearch(tmp_map_offd, i1, num_cols_offd_P);
/* search must succeed */
hypre_assert(k1 >= 0 && k1 < num_cols_offd_P);
P_offd_j[i] = k1;
}
/* change col_map_offd_P to global coarse ids */
for (i = 0; i < num_cols_offd_P; i++)
{
col_map_offd_P[i] = fine_to_coarse_offd[tmp_map_offd[i]];
}
/* Now, we should have everything of Parcsr matrix P */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumCols(A), /* global num of rows */
total_global_cpts, /* global num of cols */
hypre_ParCSRMatrixColStarts(A), /* row_starts */
num_cpts_global, /* col_starts */
num_cols_offd_P, /* num cols offd */
nnz_diag,
nnz_offd);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
/* P does not own ColStarts, since A does */
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
/* create CommPkg of P */
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* free workspace */
hypre_TFree(CF_marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd,HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd,HYPRE_MEMORY_HOST);
hypre_TFree(big_int_buf_data,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd,HYPRE_MEMORY_HOST);
hypre_TFree(marker_diag,HYPRE_MEMORY_HOST);
hypre_TFree(marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_cij,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_diag_offd,HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
MCModel.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _MCMODEL_
#define _MCMODEL_
#include <sstream>
#include "Model.h"
DEFINE_int32(rlength, 100, "Length of vector in matrix completion.");
class MCModel : public Model {
private:
std::vector<double> model;
int n_users;
int n_movies;
int rlength;
void InitializePrivateModel() {
for (int i = 0; i < n_users+n_movies; i++) {
for (int j = 0; j < rlength; j++) {
model[i*rlength+j] = ((double)rand()/(double)RAND_MAX);
}
}
}
void Initialize(const std::string &input_line) {
// Expected input_line format: N_USERS N_MOVIES.
std::stringstream input(input_line);
input >> n_users >> n_movies;
rlength = FLAGS_rlength;
// Allocate memory.
model.resize((n_users+n_movies) * rlength);
// Initialize private model.
InitializePrivateModel();
}
public:
MCModel(const std::string &input_line) {
Initialize(input_line);
}
~MCModel() {
}
void SetUp(const std::vector<Datapoint *> &datapoints) override {
// Update the movies coordinates to reference the second
// chunk of the model. Do this by offsetting the coordinates
// by n_users.
for (const auto & datapoint : datapoints) {
((MCDatapoint *)datapoint)->OffsetMovieCoord(n_users);
}
}
double ComputeLoss(const std::vector<Datapoint *> &datapoints) override {
double loss = 0;
#pragma omp parallel for num_threads(FLAGS_n_threads) reduction(+:loss)
for (int i = 0; i < datapoints.size(); i++) {
Datapoint *datapoint = datapoints[i];
const std::vector<double> &labels = datapoint->GetWeights();
const std::vector<int> &coordinates = datapoint->GetCoordinates();
double label = labels[0];
int x = coordinates[0];
int y = coordinates[1];
double cross_product = 0;
for (int j = 0; j < rlength; j++) {
cross_product += model[x*rlength+j] * model[y*rlength+j];
}
double difference = cross_product - label;
loss += difference * difference;
}
return loss / datapoints.size();
}
std::vector<double> & ModelData() {
return model;
}
int NumParameters() override {
return n_users + n_movies;
}
int CoordinateSize() override {
return rlength;
}
void PrecomputeCoefficients(Datapoint *datapoint, Gradient *g, std::vector<double> &local_model) override {
if (g->coeffs.size() != 1) g->coeffs.resize(1);
const std::vector<double> &labels = datapoint->GetWeights();
const std::vector<int> &coordinates = datapoint->GetCoordinates();
int user_coordinate = coordinates[0];
int movie_coordinate = coordinates[1];
double label = labels[0];
double coeff = 0;
for (int i = 0; i < rlength; i++) {
coeff += local_model[user_coordinate*rlength+i] * local_model[movie_coordinate*rlength+i];
}
coeff -= label;
g->coeffs[0] = coeff;
}
void Lambda(int coordinate, double &out, std::vector<double> &local_model) override {
}
void Kappa(int coordinate, std::vector<double> &out, std::vector<double> &local_model) override {
}
void H_bar(int coordinate, std::vector<double> &out, Gradient *g, std::vector<double> &local_model) override {
double other_coordinate = 0;
if (g->datapoint->GetCoordinates()[0] == coordinate)
other_coordinate = g->datapoint->GetCoordinates()[1];
else
other_coordinate = g->datapoint->GetCoordinates()[0];
for (int i = 0; i < rlength; i++) {
out[i] = g->coeffs[0] * local_model[other_coordinate * rlength + i];
}
}
};
#endif
|
mpi_dem_search.h | //
// Project Name: Kratos
// Last Modified by: $Author: clabra $
// Date: $Date: 2007-03-29 19:37:47 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_MPI_DEM_SEARCH_H_INCLUDED )
#define KRATOS_MPI_DEM_SEARCH_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// include kratos definitions
#include "includes/define.h"
// Project includes
#include "spatial_containers/dem_search.h"
// Application includes
#include "custom_configures/mpi_discrete_particle_configure.h"
#include "custom_utilities/bins_dynamic_objects_mpi.h"
#define CUSTOMTIMER 1
/* Timer defines */
#include "utilities/timer.h"
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class MPI_DEMSearch : public DEMSearch<MPI_DEMSearch> {
public:
///@name Type Definitions
///@{
/// Pointer definition of MPI_DEMSearch
KRATOS_CLASS_POINTER_DEFINITION(MPI_DEMSearch);
typedef MpiDiscreteParticleConfigure<3> ElementConfigureType;
typedef BinsObjectDynamicMpi<ElementConfigureType> BinsType;
typedef ElementConfigureType::IteratorType IteratorType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
MPI_DEMSearch(Communicator& comm) : mCommunicator(comm){
}
/// Destructor.
~MPI_DEMSearch(){}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void SearchElementsInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance ) {
KRATOS_TRY
Clean_Modelpart();
// Get the data
auto & elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
auto & elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
int NumberOfSearchElements = elements_array.size();
int NumberOfModelPElements = elements_ModelPart.size();
int MaxNumberOfElements = 50;
// Generate the bins
BinsType bins(elements_ModelPart.begin(), elements_ModelPart.end());
// Perform the search
std::vector<std::size_t> NumberOfResults(NumberOfModelPElements);
for (IteratorType particle_pointer_it = elements_array.begin(); particle_pointer_it != elements_array.end(); particle_pointer_it++) {
rResults[particle_pointer_it-elements_array.begin()].resize(MaxNumberOfElements);
rResultsDistance[particle_pointer_it-elements_array.begin()].resize(MaxNumberOfElements);
}
bins.SearchObjectsInRadius(
elements_array.begin(), elements_array.end(), NumberOfSearchElements, Radius,
rResults,rResultsDistance,NumberOfResults,
MaxNumberOfElements,&mCommunicator
);
for(int i = 0; i < NumberOfSearchElements; i++) {
rResults[i].resize(NumberOfResults[i]);
rResultsDistance[i].resize(NumberOfResults[i]);
}
// Update the modelpart interface and keep the coherence between domains
//
// Charlie: Pretty sure from this point onwards the code is redundant with the new configure functions
// to maintain the interface.
int ResultCounter = 0;
for (IteratorType particle_pointer_it = elements_array.begin(); particle_pointer_it != elements_array.end(); ++particle_pointer_it, ++ResultCounter) {
unsigned int neighbour_counter = 0;
for (ResultIteratorType neighbour_it = rResults[ResultCounter].begin(); neighbour_counter < NumberOfResults[ResultCounter]; ++neighbour_it, ++neighbour_counter) {
Add_To_Modelpart(neighbour_it);
}
}
//mCommunicator.SynchronizeNodalSolutionStepsData();
// Finally sort model for correct sync
Sort_Modelpart();
//mCommunicator.SynchronizeNodalSolutionStepsData();
KRATOS_CATCH(" ")
}
void SearchElementsInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchElementsInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchElementsInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchNodesInRadiusExclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults,
VectorDistanceType& rResultsDistance ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchNodesInRadiusInclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults,
VectorDistanceType& rResultsDistance ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchNodesInRadiusExclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchNodesInRadiusInclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchGeometricalInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ConditionsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchGeometricalInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ConditionsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchGeometricalInRadiusExclusiveImplementation (
ConditionsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
void SearchGeometricalInRadiusInclusiveImplementation (
ConditionsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance ) {
KRATOS_ERROR << "Not implemented" << std::endl;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
std::stringstream buffer;
buffer << "MPIDemSearch" ;
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const {rOStream << "MPIDemSearch";}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Communicator& mCommunicator;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void Clean_Modelpart() {
KRATOS_TRY
Communicator::NeighbourIndicesContainerType communicator_ranks = mCommunicator.NeighbourIndices();
unsigned int NumberOfRanks = mCommunicator.GetNumberOfColors();
std::vector<ModelPart::ElementsContainerType> ETempGhost(NumberOfRanks);
std::vector<ModelPart::ElementsContainerType> ETempLocal(NumberOfRanks);
std::vector<ModelPart::NodesContainerType> NTempGhost(NumberOfRanks);
std::vector<ModelPart::NodesContainerType> NTempLocal(NumberOfRanks);
//Clean the ghost(i) and local(i) meshes
for(unsigned int i = 0; i < NumberOfRanks; i++) {
ETempGhost[i].swap(mCommunicator.GhostMesh(i).Elements());
ETempLocal[i].swap(mCommunicator.LocalMesh(i).Elements());
NTempGhost[i].swap(mCommunicator.GhostMesh(i).Nodes());
NTempLocal[i].swap(mCommunicator.LocalMesh(i).Nodes());
}
//Celan the ghost mesh
ModelPart::ElementsContainerType ETempGhostGlobal;
ModelPart::NodesContainerType NTempGhostGlobal;
ETempGhostGlobal.swap(mCommunicator.GhostMesh().Elements());
NTempGhostGlobal.swap(mCommunicator.GhostMesh().Nodes());
KRATOS_CATCH(" ")
}
//TODO: Enable Local nodes again and remove them from the search function
void Add_To_Modelpart(ResultIteratorType neighbour_it) {
KRATOS_TRY
#pragma omp critical
{
Communicator::NeighbourIndicesContainerType communicator_ranks = mCommunicator.NeighbourIndices();
ElementsContainerType::ContainerType& pGhostElements = mCommunicator.GhostMesh().ElementsArray();
int NumberOfRanks = mCommunicator.GetNumberOfColors();
int destination = -1;
bool IsInGhostMesh = false;
for(int i = 0; i < NumberOfRanks; i++) {
if((*neighbour_it)->GetGeometry()(0)->GetSolutionStepValue(PARTITION_INDEX) == communicator_ranks[i]) {
destination = i;
}
}
if(destination > -1) {
for(IteratorType element_it = pGhostElements.begin(); !IsInGhostMesh && element_it != pGhostElements.end(); ++element_it) {
if((*element_it)->GetGeometry()(0)->Id() == (*neighbour_it)->GetGeometry()(0)->Id()) {
IsInGhostMesh = true;
}
}
if(!IsInGhostMesh) {
mCommunicator.GhostMesh().Elements().push_back((*neighbour_it));
mCommunicator.GhostMesh().Nodes().push_back((*neighbour_it)->GetGeometry()(0));
}
IsInGhostMesh = false;
ElementsContainerType::ContainerType& pMyGhostElements = mCommunicator.GhostMesh(destination).ElementsArray();
for(IteratorType element_it = pMyGhostElements.begin(); !IsInGhostMesh && element_it != pMyGhostElements.end(); ++element_it) {
if((*element_it)->GetGeometry()(0)->Id() == (*neighbour_it)->GetGeometry()(0)->Id()) {
IsInGhostMesh = true;
}
}
if(!IsInGhostMesh) {
mCommunicator.GhostMesh(destination).Elements().push_back((*neighbour_it));
mCommunicator.GhostMesh(destination).Nodes().push_back((*neighbour_it)->GetGeometry()(0));
}
}
}
KRATOS_CATCH(" ")
}
void Sort_Modelpart() {
KRATOS_TRY
for (unsigned int i = 0; i < mCommunicator.LocalMeshes().size(); i++){
mCommunicator.LocalMesh(i).Nodes().Unique();
mCommunicator.LocalMesh(i).Elements().Unique(); //MA is this necessary (if not, I have repeated elements)
}
mCommunicator.GhostMesh().Nodes().Unique();
mCommunicator.GhostMesh().Elements().Unique(); //MA is this necessary (if not, I have repeated elements)
for (unsigned int i = 0; i < mCommunicator.GhostMeshes().size(); i++) {
mCommunicator.GhostMesh(i).Nodes().Unique();
mCommunicator.GhostMesh(i).Elements().Unique(); //MA is this necessary (if not, I have repeated elements)
}
KRATOS_CATCH(" ")
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
MPI_DEMSearch& operator=(MPI_DEMSearch const& rOther) {
return *this;
}
/// Copy constructor.
MPI_DEMSearch(MPI_DEMSearch const& rOther) : mCommunicator(rOther.mCommunicator) {
*this = rOther;
}
///@}
}; // Class DEMSearch
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream, MPI_DEMSearch& rThis) {
return rIStream;
}
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream, const MPI_DEMSearch& rThis) {
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_MPI_DEM_SEARCH_H_INCLUDED defined
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
par_mod_lr_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_q, *D_w;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int i, j;
HYPRE_Int *cpt_array;
HYPRE_Int *start_array;
HYPRE_Int *startf_array;
HYPRE_Int start, stop, startf, stopf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
#else
total_global_cpts = num_cpts_global[num_procs];
n_Cpts = num_cpts_global[my_id+1]-num_cpts_global[my_id];
#endif
hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,row)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Real beta, gamma;
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
startf = start - cpt_array[my_thread_num-1];
else
startf = 0;
if (my_thread_num < num_threads-1)
stopf = stop - cpt_array[my_thread_num];
else
stopf = n_Fpts;
startf_array[my_thread_num+1] = stopf;
/* Create D_q = D_beta */
for (i=startf; i < stopf; i++)
{
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_q[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_q[i] += As_FC_offd_data[j];
}
}
/* Create D_w = D_alpha + D_gamma */
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_q[row];
row++;
}
}
for (i=startf; i<stopf; i++)
{
j = As_FF_diag_i[i];
if (D_w[i]) beta = 1.0/D_w[i];
else beta = 1.0;
As_FF_diag_data[j] = beta*D_q[i];
if (D_q[i]) gamma = -1.0/D_q[i];
else gamma = 1.0;
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
As_FF_diag_data[j] *= beta;
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
As_FF_offd_data[j] *= beta;
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
As_FC_diag_data[j] *= gamma;
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
As_FC_offd_data[j] *= gamma;
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
startf = startf_array[my_thread_num];
stopf = startf_array[my_thread_num+1];
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
cnt_diag = W_diag_i[startf]+c_pt;
cnt_offd = W_offd_i[startf];
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
}
else
{
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
{
P_marker[P_offd_j[i]] = 1;
}
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
{
if (P_marker[i]) new_ncols_P_offd++;
}
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_q, memory_location_P);
hypre_TFree(D_w, memory_location_P);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(startf_array, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
return hypre_error_flag;
}
/*-----------------------------------------------------------------------*
* Modularized Extended Interpolation
*-----------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("ModExtInterp");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_BoomerAMGBuildModExtInterpHost(A,CF_marker,S,num_cpts_global,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A,P_ptr);
}
#if defined(HYPRE_USING_CUDA)
else
{
ierr = hypre_BoomerAMGBuildExtInterpDevice(A,CF_marker,S,num_cpts_global,1,NULL,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A,P_ptr);
}
#endif
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtPIInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle = NULL;
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
hypre_CSRMatrix *As_FF_ext = NULL;
HYPRE_Real *As_FF_ext_data = NULL;
HYPRE_Int *As_FF_ext_i = NULL;
HYPRE_BigInt *As_FF_ext_j = NULL;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_q, *D_w, *D_theta, *D_q_offd = NULL;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_diag_j;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FF_offd_j = NULL;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j = NULL;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data = NULL;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data = NULL;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data = NULL;
HYPRE_Real *buf_data = NULL;
HYPRE_Real *tmp_FF_diag_data = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_BigInt first_index;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
/* Loop variables */
HYPRE_Int index, startc, num_sends;
HYPRE_Int i, j, jj, k, kk;
HYPRE_Int *cpt_array;
HYPRE_Int *start_array;
HYPRE_Int *startf_array;
HYPRE_Int start, stop, startf, stopf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt;
HYPRE_Int num_cols_A_FF_offd;
HYPRE_Real value, value1, theta;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
#else
total_global_cpts = num_cpts_global[num_procs];
n_Cpts = num_cpts_global[my_id+1]-num_cpts_global[my_id];
#endif
hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
if (num_procs > 1)
{
As_FF_ext = hypre_ParCSRMatrixExtractBExt(As_FF,As_FF,1);
As_FF_ext_i = hypre_CSRMatrixI(As_FF_ext);
As_FF_ext_j = hypre_CSRMatrixBigJ(As_FF_ext);
As_FF_ext_data = hypre_CSRMatrixData(As_FF_ext);
}
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
first_index = hypre_ParCSRMatrixRowStarts(As_FF)[0];
#else
first_index = hypre_ParCSRMatrixRowStarts(As_FF)[my_id];
#endif
tmp_FF_diag_data = hypre_CTAlloc(HYPRE_Real, As_FF_diag_i[n_Fpts], memory_location_P);
D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_theta = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,jj,k,kk,start,stop,startf,stopf,row,theta,value,value1)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
startf = start - cpt_array[my_thread_num-1];
else
startf = 0;
if (my_thread_num < num_threads-1)
stopf = stop - cpt_array[my_thread_num];
else
stopf = n_Fpts;
startf_array[my_thread_num+1] = stopf;
for (i=startf; i < stopf; i++)
{
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_q[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_q[i] += As_FC_offd_data[j];
}
}
for (j = As_FF_diag_i[startf]; j < As_FF_diag_i[stopf]; j++)
{
tmp_FF_diag_data[j] = As_FF_diag_data[j];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_FF_offd)
{
D_q_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P);
}
index = 0;
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(As_FF);
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
buf_data[index++] = D_q[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_q_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_q[row];
row++;
}
}
for (i=startf; i<stopf; i++)
{
for (j = As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
jj = As_FF_diag_j[j];
value = D_q[jj];
for (k = As_FF_diag_i[jj]+1; k < As_FF_diag_i[jj+1]; k++)
{
kk = As_FF_diag_j[k];
if (kk == i)
{
value1 = tmp_FF_diag_data[k];
value += value1;
D_theta[i] += As_FF_diag_data[j]*value1/value;
break;
}
}
As_FF_diag_data[j] /= value;
}
for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
jj = As_FF_offd_j[j];
value = D_q_offd[jj];
for (k = As_FF_ext_i[jj]; k < As_FF_ext_i[jj+1]; k++)
{
kk = (HYPRE_Int)(As_FF_ext_j[k] - first_index);
if (kk == i)
{
value1 = As_FF_ext_data[k];
value += value1;
D_theta[i] += As_FF_offd_data[j]*value1/value;
break;
}
}
As_FF_offd_data[j] /= value;
}
As_FF_diag_data[As_FF_diag_i[i]] = 1.0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i=startf; i<stopf; i++)
{
theta = (D_theta[i]+D_w[i]);
if (theta)
{
theta = -1.0/theta;
for (j=As_FF_diag_i[i]; j < As_FF_diag_i[i+1]; j++)
As_FF_diag_data[j] *= theta;
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
As_FF_offd_data[j] *= theta;
}
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
startf = startf_array[my_thread_num];
stopf = startf_array[my_thread_num+1];
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
cnt_diag = W_diag_i[startf]+c_pt;
cnt_offd = W_offd_i[startf];
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
}
else
{
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
P_marker[P_offd_j[i]] = 1;
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i]) new_ncols_P_offd++;
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_q, memory_location_P);
hypre_TFree(D_q_offd, memory_location_P);
hypre_TFree(D_w, memory_location_P);
hypre_TFree(D_theta, memory_location_P);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(startf_array, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, memory_location_P);
hypre_TFree(tmp_FF_diag_data, memory_location_P);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
hypre_CSRMatrixDestroy(As_FF_ext);
return hypre_error_flag;
}
/*-----------------------------------------------------------------------*
* Modularized Extended+i Interpolation
*-----------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtPIInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("ExtPIInterp");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_BoomerAMGBuildModExtPIInterpHost(A, CF_marker, S, num_cpts_global,
debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, P_ptr);
}
#if defined(HYPRE_USING_CUDA)
else
{
ierr = hypre_BoomerAMGBuildExtPIInterpDevice(A, CF_marker, S, num_cpts_global, 1, NULL,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
#endif
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModNewExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModNewExtPIInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle = NULL;
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_beta, *D_w, *D_lambda, *D_tmp, *D_tau, *D_tmp_offd = NULL;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_diag_j;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FF_offd_j;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j = NULL;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data = NULL;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data = NULL;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data = NULL;
HYPRE_Real *buf_data = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
/* Loop variables */
HYPRE_Int index, startc, num_sends;
HYPRE_Int i, j;
HYPRE_Int *cpt_array;
HYPRE_Int *start_array;
HYPRE_Int *startf_array;
HYPRE_Int start, stop, startf, stopf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt;
HYPRE_Int num_cols_A_FF_offd;
HYPRE_Real value, theta;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
#else
total_global_cpts = num_cpts_global[num_procs];
n_Cpts = num_cpts_global[my_id+1]-num_cpts_global[my_id];
#endif
hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd);
D_beta = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_lambda = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_tmp = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_tau = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,row,theta,value)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
startf = start - cpt_array[my_thread_num-1];
else
startf = 0;
if (my_thread_num < num_threads-1)
stopf = stop - cpt_array[my_thread_num];
else
stopf = n_Fpts;
startf_array[my_thread_num+1] = stopf;
for (i=startf; i < stopf; i++)
{
HYPRE_Real number;
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
D_lambda[i] += As_FF_diag_data[j];
}
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
D_lambda[i] += As_FF_offd_data[j];
}
number = (HYPRE_Real)(As_FF_diag_i[i+1]-As_FF_diag_i[i]-1+As_FF_offd_i[i+1]-As_FF_offd_i[i]);
if (number) D_lambda[i] /= number;
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_beta[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_beta[i] += As_FC_offd_data[j];
}
if (D_lambda[i]+D_beta[i]) D_tmp[i] = D_lambda[i]/(D_beta[i]+D_lambda[i]);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_FF_offd)
{
D_tmp_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P);
}
index = 0;
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(As_FF);
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
buf_data[index++] = D_tmp[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_tmp_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_beta[row];
row++;
}
}
for (i=startf; i<stopf; i++)
{
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
index = As_FF_diag_j[j];
D_tau[i] += As_FF_diag_data[j]*D_tmp[index];
}
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
index = As_FF_offd_j[j];
D_tau[i] += As_FF_offd_data[j]*D_tmp_offd[index];
}
}
for (i=startf; i<stopf; i++)
{
value = D_w[i]+D_tau[i];
if (value) value = -1.0/value;
theta = D_beta[i]+D_lambda[i];
As_FF_diag_data[As_FF_diag_i[i]] = value*theta;
if (theta) theta = 1.0/theta;
for (j = As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
As_FF_diag_data[j] *= value;
}
for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
As_FF_offd_data[j] *= value;
}
for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
As_FC_diag_data[j] *= theta;
}
for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
As_FC_offd_data[j] *= theta;
}
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
startf = startf_array[my_thread_num];
stopf = startf_array[my_thread_num+1];
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
cnt_diag = W_diag_i[startf]+c_pt;
cnt_offd = W_offd_i[startf];
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
}
else
{
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
P_marker[P_offd_j[i]] = 1;
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i]) new_ncols_P_offd++;
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_tmp, memory_location_P);
hypre_TFree(D_tmp_offd, memory_location_P);
hypre_TFree(D_w, memory_location_P);
hypre_TFree(D_tau, memory_location_P);
hypre_TFree(D_beta, memory_location_P);
hypre_TFree(D_lambda, memory_location_P);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(startf_array, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, memory_location_P);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
return hypre_error_flag;
}
|
GB_unaryop__ainv_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp64_fp64
// op(A') function: GB_tran__ainv_fp64_fp64
// C type: double
// A type: double
// cast: double cij = (double) aij
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
common.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_
#define LIGHTGBM_UTILS_COMMON_FUN_H_
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <limits>
#include <string>
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <functional>
#include <iomanip>
#include <iterator>
#include <map>
#include <memory>
#include <sstream>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include <mimalloc.h>
#ifdef _MSC_VER
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
#endif
#if defined(_MSC_VER)
#include <malloc.h>
#elif MM_MALLOC
#include <mm_malloc.h>
// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
// https://www.oreilly.com/library/view/mac-os-x/0596003560/ch05s01s02.html
#elif defined(__GNUC__) && defined(HAVE_MALLOC_H)
#include <malloc.h>
#define _mm_malloc(a, b) memalign(b, a)
#define _mm_free(a) free(a)
#else
#include <stdlib.h>
#define _mm_malloc(a, b) malloc(a)
#define _mm_free(a) free(a)
#endif
namespace LightGBM
{
namespace Common
{
inline static char tolower(char in)
{
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string Trim(std::string str)
{
if (str.empty())
{
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string RemoveQuotationSymbol(std::string str)
{
if (str.empty())
{
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string &str, const std::string prefix)
{
if (str.substr(0, prefix.size()) == prefix)
{
return true;
}
else
{
return false;
}
}
inline static std::vector<std::string, mi_stl_allocator<std::string>> Split(const char *c_str, char delimiter)
{
std::vector<std::string, mi_stl_allocator<std::string>> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length())
{
if (str[pos] == delimiter)
{
if (i < pos)
{
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
}
else
{
++pos;
}
}
if (i < pos)
{
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string, mi_stl_allocator<std::string>> SplitBrackets(const char *c_str, char left_delimiter, char right_delimiter)
{
std::vector<std::string, mi_stl_allocator<std::string>> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
bool open = false;
while (pos < str.length())
{
if (str[pos] == left_delimiter)
{
open = true;
++pos;
i = pos;
}
else if (str[pos] == right_delimiter && open)
{
if (i < pos)
{
ret.push_back(str.substr(i, pos - i));
}
open = false;
++pos;
}
else
{
++pos;
}
}
return ret;
}
inline static std::vector<std::string, mi_stl_allocator<std::string>> SplitLines(const char *c_str)
{
std::vector<std::string, mi_stl_allocator<std::string>> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length())
{
if (str[pos] == '\n' || str[pos] == '\r')
{
if (i < pos)
{
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r')
++pos;
// new begin
i = pos;
}
else
{
++pos;
}
}
if (i < pos)
{
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string, mi_stl_allocator<std::string>> Split(const char *c_str, const char *delimiters)
{
std::vector<std::string, mi_stl_allocator<std::string>> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length())
{
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j)
{
if (str[pos] == delimiters[j])
{
met_delimiters = true;
break;
}
}
if (met_delimiters)
{
if (i < pos)
{
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
}
else
{
++pos;
}
}
if (i < pos)
{
ret.push_back(str.substr(i));
}
return ret;
}
template <typename T>
inline static const char *Atoi(const char *p, T *out)
{
int sign;
T value;
while (*p == ' ')
{
++p;
}
sign = 1;
if (*p == '-')
{
sign = -1;
++p;
}
else if (*p == '+')
{
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p)
{
value = value * 10 + (*p - '0');
}
*out = static_cast<T>(sign * value);
while (*p == ' ')
{
++p;
}
return p;
}
template <typename T>
inline static double Pow(T base, int power)
{
if (power < 0)
{
return 1.0 / Pow(base, -power);
}
else if (power == 0)
{
return 1;
}
else if (power % 2 == 0)
{
return Pow(base * base, power / 2);
}
else if (power % 3 == 0)
{
return Pow(base * base * base, power / 3);
}
else
{
return base * Pow(base, power - 1);
}
}
inline static const char *Atof(const char *p, double *out)
{
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ')
{
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-')
{
sign = -1.0;
++p;
}
else if (*p == '+')
{
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E')
{
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p)
{
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.')
{
double right = 0.0;
int nn = 0;
++p;
while (*p >= '0' && *p <= '9')
{
right = (*p - '0') + right * 10.0;
++nn;
++p;
}
value += right / Pow(10.0, nn);
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E'))
{
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-')
{
frac = 1;
++p;
}
else if (*p == '+')
{
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p)
{
expon = expon * 10 + (*p - '0');
}
if (expon > 308)
expon = 308;
// Calculate scaling factor.
while (expon >= 50)
{
scale *= 1E50;
expon -= 50;
}
while (expon >= 8)
{
scale *= 1E8;
expon -= 8;
}
while (expon > 0)
{
scale *= 10.0;
expon -= 1;
}
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
}
else
{
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':')
{
++cnt;
}
if (cnt > 0)
{
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan") ||
tmp_str == std::string("null"))
{
*out = NAN;
}
else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity"))
{
*out = sign * 1e308;
}
else
{
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ')
{
++p;
}
return p;
}
inline static bool AtoiAndCheck(const char *p, int *out)
{
const char *after = Atoi(p, out);
if (*after != '\0')
{
return false;
}
return true;
}
inline static bool AtofAndCheck(const char *p, double *out)
{
const char *after = Atof(p, out);
if (*after != '\0')
{
return false;
}
return true;
}
inline static unsigned CountDecimalDigit32(uint32_t n)
{
#if defined(_MSC_VER) || defined(__GNUC__)
static const uint32_t powers_of_10[] = {
0,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000};
#ifdef _MSC_VER
// NOLINTNEXTLINE
unsigned long i = 0;
_BitScanReverse(&i, n | 1);
uint32_t t = (i + 1) * 1233 >> 12;
#elif __GNUC__
uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12;
#endif
return t - (n < powers_of_10[t]) + 1;
#else
if (n < 10)
return 1;
if (n < 100)
return 2;
if (n < 1000)
return 3;
if (n < 10000)
return 4;
if (n < 100000)
return 5;
if (n < 1000000)
return 6;
if (n < 10000000)
return 7;
if (n < 100000000)
return 8;
if (n < 1000000000)
return 9;
return 10;
#endif
}
inline static void Uint32ToStr(uint32_t value, char *buffer)
{
const char kDigitsLut[200] = {
'0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9',
'1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9',
'2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9',
'3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9',
'4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9',
'5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9',
'6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9',
'7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9',
'8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9',
'9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9'};
unsigned digit = CountDecimalDigit32(value);
buffer += digit;
*buffer = '\0';
while (value >= 100)
{
const unsigned i = (value % 100) << 1;
value /= 100;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
if (value < 10)
{
*--buffer = static_cast<char>(value) + '0';
}
else
{
const unsigned i = value << 1;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
}
inline static void Int32ToStr(int32_t value, char *buffer)
{
uint32_t u = static_cast<uint32_t>(value);
if (value < 0)
{
*buffer++ = '-';
u = ~u + 1;
}
Uint32ToStr(u, buffer);
}
inline static void DoubleToStr(double value, char *buffer, size_t buffer_len)
{
#ifdef _MSC_VER
int num_chars = sprintf_s(buffer, buffer_len, "%.17g", value);
#else
int num_chars = snprintf(buffer, buffer_len, "%.17g", value);
#endif
CHECK_GE(num_chars, 0);
}
inline static const char *SkipSpaceAndTab(const char *p)
{
while (*p == ' ' || *p == '\t')
{
++p;
}
return p;
}
inline static const char *SkipReturn(const char *p)
{
while (*p == '\n' || *p == '\r' || *p == ' ')
{
++p;
}
return p;
}
template <typename T, typename T2>
inline static std::vector<T2, mi_stl_allocator<T2>> ArrayCast(const std::vector<T, mi_stl_allocator<T>> &arr)
{
std::vector<T2, mi_stl_allocator<T2>> ret(arr.size());
for (size_t i = 0; i < arr.size(); ++i)
{
ret[i] = static_cast<T2>(arr[i]);
}
return ret;
}
template <typename T, bool is_float, bool is_unsign>
struct __TToStringHelperFast
{
void operator()(T value, char *buffer, size_t) const
{
Int32ToStr(value, buffer);
}
};
template <typename T>
struct __TToStringHelperFast<T, true, false>
{
void operator()(T value, char *buffer, size_t buf_len)
const
{
#ifdef _MSC_VER
int num_chars = sprintf_s(buffer, buf_len, "%g", value);
#else
int num_chars = snprintf(buffer, buf_len, "%g", value);
#endif
CHECK_GE(num_chars, 0);
}
};
template <typename T>
struct __TToStringHelperFast<T, false, true>
{
void operator()(T value, char *buffer, size_t) const
{
Uint32ToStr(value, buffer);
}
};
template <typename T>
inline static std::string ArrayToStringFast(const std::vector<T, mi_stl_allocator<T>> &arr, size_t n)
{
if (arr.empty() || n == 0)
{
return std::string("");
}
__TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper;
const size_t buf_len = 16;
std::vector<char, mi_stl_allocator<char>> buffer(buf_len);
std::stringstream str_buf;
helper(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i)
{
helper(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
inline static std::string ArrayToString(const std::vector<double, mi_stl_allocator<double>> &arr, size_t n)
{
if (arr.empty() || n == 0)
{
return std::string("");
}
const size_t buf_len = 32;
std::vector<char, mi_stl_allocator<char>> buffer(buf_len);
std::stringstream str_buf;
DoubleToStr(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i)
{
DoubleToStr(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
template <typename T, bool is_float>
struct __StringToTHelper
{
T operator()(const std::string &str) const
{
T ret = 0;
Atoi(str.c_str(), &ret);
return ret;
}
};
template <typename T>
struct __StringToTHelper<T, true>
{
T operator()(const std::string &str) const
{
return static_cast<T>(std::stod(str));
}
};
template <typename T>
inline static std::vector<T, mi_stl_allocator<T>> StringToArray(const std::string &str, char delimiter)
{
std::vector<std::string, mi_stl_allocator<std::string>> strs = Split(str.c_str(), delimiter);
std::vector<T, mi_stl_allocator<T>> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto &s : strs)
{
ret.push_back(helper(s));
}
return ret;
}
template <typename T>
inline static std::vector<std::vector<T, mi_stl_allocator<T>>, mi_stl_allocator<std::vector<T, mi_stl_allocator<T>>>> StringToArrayofArrays(
const std::string &str, char left_bracket, char right_bracket, char delimiter)
{
std::vector<std::string, mi_stl_allocator<std::string>> strs = SplitBrackets(str.c_str(), left_bracket, right_bracket);
std::vector<std::vector<T, mi_stl_allocator<T>>, mi_stl_allocator<std::vector<T, mi_stl_allocator<T>>>> ret;
for (const auto &s : strs)
{
ret.push_back(StringToArray<T>(s, delimiter));
}
return ret;
}
template <typename T>
inline static std::vector<T, mi_stl_allocator<T>> StringToArray(const std::string &str, int n)
{
if (n == 0)
{
return std::vector<T, mi_stl_allocator<T>>();
}
std::vector<std::string, mi_stl_allocator<std::string>> strs = Split(str.c_str(), ' ');
CHECK_EQ(strs.size(), static_cast<size_t>(n));
std::vector<T, mi_stl_allocator<T>> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto &s : strs)
{
ret.push_back(helper(s));
}
return ret;
}
template <typename T, bool is_float>
struct __StringToTHelperFast
{
const char *operator()(const char *p, T *out) const
{
return Atoi(p, out);
}
};
template <typename T>
struct __StringToTHelperFast<T, true>
{
const char *operator()(const char *p, T *out) const
{
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template <typename T>
inline static std::vector<T, mi_stl_allocator<T>> StringToArrayFast(const std::string &str, int n)
{
if (n == 0)
{
return std::vector<T, mi_stl_allocator<T>>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T, mi_stl_allocator<T>> ret(n);
for (int i = 0; i < n; ++i)
{
p_str = helper(p_str, &ret[i]);
}
return ret;
}
template <typename T>
inline static std::string Join(const std::vector<T, mi_stl_allocator<T>> &strs, const char *delimiter)
{
if (strs.empty())
{
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i)
{
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template <>
inline std::string Join<int8_t>(const std::vector<int8_t, mi_stl_allocator<int8_t>> &strs, const char *delimiter)
{
if (strs.empty())
{
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << static_cast<int16_t>(strs[0]);
for (size_t i = 1; i < strs.size(); ++i)
{
str_buf << delimiter;
str_buf << static_cast<int16_t>(strs[i]);
}
return str_buf.str();
}
template <typename T>
inline static std::string Join(const std::vector<T, mi_stl_allocator<T>> &strs, size_t start, size_t end, const char *delimiter)
{
if (end - start <= 0)
{
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i)
{
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
inline static int64_t Pow2RoundUp(int64_t x)
{
int64_t t = 1;
for (int i = 0; i < 64; ++i)
{
if (t >= x)
{
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformation on p_rec
* \param p_rec The input/output vector of the values.
*/
inline static void Softmax(std::vector<double, mi_stl_allocator<double>> *p_rec)
{
std::vector<double, mi_stl_allocator<double>> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i)
{
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i)
{
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i)
{
rec[i] /= static_cast<double>(wsum);
}
}
inline static void Softmax(const double *input, double *output, int len)
{
double wmax = input[0];
for (int i = 1; i < len; ++i)
{
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i)
{
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i)
{
output[i] /= static_cast<double>(wsum);
}
}
template <typename T>
std::vector<const T *, mi_stl_allocator<const T *>> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>, mi_stl_allocator<std::unique_ptr<T>>> &input)
{
std::vector<const T *, mi_stl_allocator<const T *>> ret;
for (auto t = input.begin(); t != input.end(); ++t)
{
ret.push_back(t->get());
}
return ret;
}
template <typename T1, typename T2>
inline static void SortForPair(std::vector<T1, mi_stl_allocator<T1>> *keys, std::vector<T2, mi_stl_allocator<T2>> *values, size_t start, bool is_reverse = false)
{
std::vector<std::pair<T1, T2>, mi_stl_allocator<std::pair<T1, T2>>> arr;
auto &ref_key = *keys;
auto &ref_value = *values;
for (size_t i = start; i < keys->size(); ++i)
{
arr.emplace_back(ref_key[i], ref_value[i]);
}
if (!is_reverse)
{
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2> &a, const std::pair<T1, T2> &b) {
return a.first < b.first;
});
}
else
{
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2> &a, const std::pair<T1, T2> &b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i)
{
ref_key[i] = arr[i].first;
ref_value[i] = arr[i].second;
}
}
template <typename T>
inline static std::vector<T *, mi_stl_allocator<T *>> Vector2Ptr(std::vector<std::vector<T, mi_stl_allocator<T>>, mi_stl_allocator<std::vector<T, mi_stl_allocator<T>>>> *data)
{
std::vector<T *, mi_stl_allocator<T *>> ptr(data->size());
auto &ref_data = *data;
for (size_t i = 0; i < data->size(); ++i)
{
ptr[i] = ref_data[i].data();
}
return ptr;
}
template <typename T>
inline static std::vector<int, mi_stl_allocator<int>> VectorSize(const std::vector<std::vector<T, mi_stl_allocator<T>>, mi_stl_allocator<std::vector<T, mi_stl_allocator<T>>>> &data)
{
std::vector<int, mi_stl_allocator<int>> ret(data.size());
for (size_t i = 0; i < data.size(); ++i)
{
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x)
{
if (std::isnan(x))
{
return 0.0;
}
else if (x >= 1e300)
{
return 1e300;
}
else if (x <= -1e300)
{
return -1e300;
}
else
{
return x;
}
}
inline static float AvoidInf(float x)
{
if (std::isnan(x))
{
return 0.0f;
}
else if (x >= 1e38)
{
return 1e38f;
}
else if (x <= -1e38)
{
return -1e38f;
}
else
{
return x;
}
}
template <typename _Iter>
inline static typename std::iterator_traits<_Iter>::value_type *IteratorValType(_Iter)
{
return (0);
}
template <typename _RanIt, typename _Pr, typename _VTRanIt>
inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt *)
{
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = OMP_NUM_THREADS();
if (len <= kMinInnerLen || num_threads <= 1)
{
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i)
{
size_t left = inner_size * i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left)
{
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt, mi_stl_allocator<_VTRanIt>> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len)
{
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < loop_size; ++i)
{
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right)
{
continue;
}
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template <typename _RanIt, typename _Pr>
inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred)
{
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
template <typename T>
inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername)
{
auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) {
std::ostringstream os;
os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]";
Log::Fatal(os.str().c_str(), callername, i);
};
for (int i = 1; i < ny; i += 2)
{
if (y[i - 1] < y[i])
{
if (y[i - 1] < ymin)
{
fatal_msg(i - 1);
}
else if (y[i] > ymax)
{
fatal_msg(i);
}
}
else
{
if (y[i - 1] > ymax)
{
fatal_msg(i - 1);
}
else if (y[i] < ymin)
{
fatal_msg(i);
}
}
}
if (ny & 1)
{ // odd
if (y[ny - 1] < ymin || y[ny - 1] > ymax)
{
fatal_msg(ny - 1);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
template <typename T1, typename T2>
inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su)
{
T1 minw;
T1 maxw;
T1 sumw;
int i;
if (nw & 1)
{ // odd
minw = w[0];
maxw = w[0];
sumw = w[0];
i = 2;
}
else
{ // even
if (w[0] < w[1])
{
minw = w[0];
maxw = w[1];
}
else
{
minw = w[1];
maxw = w[0];
}
sumw = w[0] + w[1];
i = 3;
}
for (; i < nw; i += 2)
{
if (w[i - 1] < w[i])
{
minw = std::min(minw, w[i - 1]);
maxw = std::max(maxw, w[i]);
}
else
{
minw = std::min(minw, w[i]);
maxw = std::max(maxw, w[i - 1]);
}
sumw += w[i - 1] + w[i];
}
if (mi != nullptr)
{
*mi = minw;
}
if (ma != nullptr)
{
*ma = maxw;
}
if (su != nullptr)
{
*su = static_cast<T2>(sumw);
}
}
inline static std::vector<uint32_t, mi_stl_allocator<uint32_t>> EmptyBitset(int n)
{
int size = n / 32;
if (n % 32 != 0)
++size;
return std::vector<uint32_t, mi_stl_allocator<uint32_t>>(size);
}
template <typename T>
inline static void InsertBitset(std::vector<uint32_t, mi_stl_allocator<uint32_t>> *vec, const T val)
{
auto &ref_v = *vec;
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec->size()) < i1 + 1)
{
vec->resize(i1 + 1, 0);
}
ref_v[i1] |= (1 << i2);
}
template <typename T>
inline static std::vector<uint32_t, mi_stl_allocator<uint32_t>> ConstructBitset(const T *vals, int n)
{
std::vector<uint32_t, mi_stl_allocator<uint32_t>> ret;
for (int i = 0; i < n; ++i)
{
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1)
{
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template <typename T>
inline static bool FindInBitset(const uint32_t *bits, int n, T pos)
{
int i1 = pos / 32;
if (i1 >= n)
{
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
inline static bool CheckDoubleEqualOrdered(double a, double b)
{
double upper = std::nextafter(a, INFINITY);
return b <= upper;
}
inline static double GetDoubleUpperBound(double a)
{
return std::nextafter(a, INFINITY);
}
inline static size_t GetLine(const char *str)
{
auto start = str;
while (*str != '\0' && *str != '\n' && *str != '\r')
{
++str;
}
return str - start;
}
inline static const char *SkipNewLine(const char *str)
{
if (*str == '\r')
{
++str;
}
if (*str == '\n')
{
++str;
}
return str;
}
template <typename T>
static int Sign(T x)
{
return (x > T(0)) - (x < T(0));
}
template <typename T>
static T SafeLog(T x)
{
if (x > 0)
{
return std::log(x);
}
else
{
return -INFINITY;
}
}
inline bool CheckAllowedJSON(const std::string &s)
{
unsigned char char_code;
for (auto c : s)
{
char_code = static_cast<unsigned char>(c);
if (char_code == 34 // "
|| char_code == 44 // ,
|| char_code == 58 // :
|| char_code == 91 // [
|| char_code == 93 // ]
|| char_code == 123 // {
|| char_code == 125 // }
)
{
return false;
}
}
return true;
}
inline int RoundInt(double x)
{
return static_cast<int>(x + 0.5f);
}
template <typename T, std::size_t N = 32>
class AlignmentAllocator
{
public:
typedef T value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T &reference;
typedef const T &const_reference;
inline AlignmentAllocator() throw() {}
template <typename T2>
inline AlignmentAllocator(const AlignmentAllocator<T2, N> &) throw() {}
inline ~AlignmentAllocator() throw() {}
inline pointer adress(reference r)
{
return &r;
}
inline const_pointer adress(const_reference r) const
{
return &r;
}
inline pointer allocate(size_type n)
{
return (pointer)_mm_malloc(n * sizeof(value_type), N);
}
inline void deallocate(pointer p, size_type)
{
_mm_free(p);
}
inline void construct(pointer p, const value_type &wert)
{
new (p) value_type(wert);
}
inline void destroy(pointer p)
{
p->~value_type();
}
inline size_type max_size() const throw()
{
return size_type(-1) / sizeof(value_type);
}
template <typename T2>
struct rebind
{
typedef AlignmentAllocator<T2, N> other;
};
bool operator!=(const AlignmentAllocator<T, N> &other) const
{
return !(*this == other);
}
// Returns true if and only if storage allocated from *this
// can be deallocated from other, and vice versa.
// Always returns true for stateless allocators.
bool operator==(const AlignmentAllocator<T, N> &) const
{
return true;
}
};
class Timer
{
public:
Timer()
{
#ifdef TIMETAG
int num_threads = OMP_NUM_THREADS();
start_time_.resize(num_threads);
stats_.resize(num_threads);
#endif // TIMETAG
}
~Timer() { Print(); }
#ifdef TIMETAG
void Start(const std::string &name)
{
auto tid = omp_get_thread_num();
start_time_[tid][name] = std::chrono::steady_clock::now();
}
void Stop(const std::string &name)
{
auto cur_time = std::chrono::steady_clock::now();
auto tid = omp_get_thread_num();
if (stats_[tid].find(name) == stats_[tid].end())
{
stats_[tid][name] = std::chrono::duration<double, std::milli>(0);
}
stats_[tid][name] += cur_time - start_time_[tid][name];
}
#else
void Start(const std::string &)
{
}
void Stop(const std::string &) {}
#endif // TIMETAG
void Print() const
{
#ifdef TIMETAG
std::unordered_map<std::string, std::chrono::duration<double, std::milli>>
stats(stats_[0].begin(), stats_[0].end());
for (size_t i = 1; i < stats_.size(); ++i)
{
for (auto it = stats_[i].begin(); it != stats_[i].end(); ++it)
{
if (stats.find(it->first) == stats.end())
{
stats[it->first] = it->second;
}
else
{
stats[it->first] += it->second;
}
}
}
std::map<std::string, std::chrono::duration<double, std::milli>> ordered(
stats.begin(), stats.end());
for (auto it = ordered.begin(); it != ordered.end(); ++it)
{
Log::Info("%s costs:\t %f", it->first.c_str(), it->second * 1e-3);
}
#endif // TIMETAG
}
#ifdef TIMETAG
std::vector<
std::unordered_map<std::string, std::chrono::steady_clock::time_point>>
start_time_;
std::vector<std::unordered_map<std::string,
std::chrono::duration<double, std::milli>>>
stats_;
#endif // TIMETAG
};
// Note: this class is not thread-safe, don't use it inside omp blocks
class FunctionTimer
{
public:
#ifdef TIMETAG
FunctionTimer(const std::string &name, Timer &timer) : timer_(timer)
{
timer.Start(name);
name_ = name;
}
~FunctionTimer() { timer_.Stop(name_); }
private:
std::string name_;
Timer &timer_;
#else
FunctionTimer(const std::string &, Timer &)
{
}
#endif // TIMETAG
};
} // namespace Common
extern Common::Timer global_timer;
} // namespace LightGBM
#endif // LightGBM_UTILS_COMMON_FUN_H_
|
GB_unop__carg_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__carg_fp32_fc32
// op(A') function: GB_unop_tran__carg_fp32_fc32
// C type: float
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = cargf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cargf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = cargf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CARG || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__carg_fp32_fc32
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cargf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cargf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__carg_fp32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fluid_solver.h | /*
* File: edgebased_levelset.h
* Author: rrossi
*
* Created on July 31, 2009, 10:51 AM
*/
/*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: antonia $
// Date: $Date: 2009-01-14 16:24:38 $
// Revision: $Revision: 1.11 $
//
//
#if !defined(KRATOS_EDGEBASED_FLUID_SOLVER_H_INCLUDED)
#define KRATOS_EDGEBASED_FLUID_SOLVER_H_INCLUDED
//#define SPLIT_OSS
#define SYMM_PRESS
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// #include <omp.h>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/cfd_variables.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "incompressible_fluid_application.h"
namespace Kratos
{
template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver>
class FluidSolver
{
public:
//name for the self defined structure
typedef EdgesStructureType<TDim> CSR_Tuple;
typedef vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef vector<unsigned int> IndicesVectorType;
//defining matrix type for test calculations
typedef vector< array_1d<double, TDim> > CalcVectorType;
//defining type for local storage of nodal values
typedef vector<double> ValuesVectorType;
//defining types for matrix operations
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
typedef std::size_t SizeType;
//constructor and destructor
FluidSolver(MatrixContainer& mr_matrix_container,
ModelPart& mr_model_part,
const double viscosity,
const double density,
const Vector body_force,
bool use_mass_correction,
double edge_detection_angle,
double stabdt_pressure_factor,
double stabdt_convection_factor,
double tau2_factor,
bool assume_constant_dp
)
: mr_matrix_container(mr_matrix_container),
mr_model_part(mr_model_part),
mstabdt_pressure_factor(stabdt_pressure_factor),
mstabdt_convection_factor(stabdt_convection_factor),
medge_detection_angle(edge_detection_angle),
mtau2_factor(tau2_factor),
massume_constant_dp(assume_constant_dp)
{
mViscosity = viscosity;
noalias(mBodyForce) = body_force;
mRho = density;
mdelta_t_avg = 1000.0;
max_dt = 1.0;
muse_mass_correction = use_mass_correction;
mWallLawIsActive = false;
// for (unsigned int i = 0; i < TDim; i++) mBodyForce[i] = 0;
// mBodyForce[1] = -9.81;
//
// mRho = 1000.0;
};
~FluidSolver()
{
};
//***********************************
//function to initialize fluid solver
void Initialize(
)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = mr_model_part.Nodes().size();
unsigned int n_edges = mr_matrix_container.GetNumberEdges();
//size data vectors
mWork.resize(n_nodes);
mvel_n.resize(n_nodes);
mvel_n1.resize(n_nodes);
mPn.resize(n_nodes);
mPn1.resize(n_nodes);
mHmin.resize(n_nodes);
mHavg.resize(n_nodes);
mNodalFlag.resize(n_nodes);
mTauPressure.resize(n_nodes);
mTauConvection.resize(n_nodes);
mTau2.resize(n_nodes);
mPi.resize(n_nodes);
mXi.resize(n_nodes);
mx.resize(n_nodes);
mEdgeDimensions.resize(n_edges);
//convection variables
mBeta.resize(n_nodes);
mdiv_error.resize(n_nodes);
mr_matrix_container.SetToZero(mdiv_error);
// ValuesVectorType external_pressure;
// external_pressure.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes());
mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes());
//set flag for first time step
mFirstStep = true;
//loop to categorize boundary nodes
std::vector< unsigned int> tempFixedVelocities;
std::vector< array_1d<double,TDim> > tempFixedVelocitiesValues;
std::vector< unsigned int> tempPressureOutletList;
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
int index = inode->FastGetSolutionStepValue(AUX_INDEX);
if (inode->IsFixed(VELOCITY_X)) //note that the variables can be either all fixed or no one fixed
{
if (inode->IsFixed(VELOCITY_Y) == false || inode->IsFixed(VELOCITY_Z) == false)
{
std::cout << "error found on the fixity of node " << inode->Id() << std::endl;
KRATOS_THROW_ERROR(std::logic_error, "velocities can be either all fixed or none fixed", "")
}
tempFixedVelocities.push_back(index);
tempFixedVelocitiesValues.push_back(mvel_n1[index]);
}
if (inode->IsFixed(PRESSURE))
{
tempPressureOutletList.push_back(index);
// mPressureOutlet.push_back(external_pressure[index]);
}
}
mFixedVelocities.resize(tempFixedVelocities.size(),false);
mFixedVelocitiesValues.resize(tempFixedVelocitiesValues.size(),false);
mPressureOutletList.resize(tempPressureOutletList.size(),false);
#pragma omp parallel for
for(int i=0; i< static_cast<int>(tempFixedVelocities.size()); i++)
{
mFixedVelocities[i] = tempFixedVelocities[i];
mFixedVelocitiesValues[i] = tempFixedVelocitiesValues[i];
}
#pragma omp parallel for
for(int i=0; i<static_cast<int>(tempPressureOutletList.size()); i++)
{
mPressureOutletList[i] = tempPressureOutletList[i];
}
//compute slip normals and fill SlipList
CalculateNormals(mr_model_part.Conditions());
mr_matrix_container.WriteVectorToDatabase(NORMAL, mSlipNormal, mr_model_part.Nodes());
if(TDim == 3)
DetectEdges3D(mr_model_part.Conditions());
//determine number of edges and entries
unsigned int n_nonzero_entries = 2 * n_edges + n_nodes;
//allocate memory for variables
mL.resize(n_nodes, n_nodes, n_nonzero_entries);
//loop over all nodes
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
//flag for considering diagonal matrix elements
bool flag = 0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//define matrix structure row by row (the order does matter!)
if ((j_neighbour > i_node) && (flag == 0))
{
//add diagonal/nodal contribution
mL.push_back(i_node, i_node, 0.0);
flag = 1;
}
//add non-diagonal/edge contribution
mL.push_back(i_node, j_neighbour, 0.0);
}
//if diagonal element is the last non-zero element of the row
if (flag == 0)
mL.push_back(i_node, i_node, 0.0);
}
//compute minimum length of the surrounding edges
CalculateEdgeLengths(mr_model_part.Nodes());
//set the pressure projection to the body force value
array_1d<double,3> temp = mRho * mBodyForce;
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
inode->FastGetSolutionStepValue(PRESS_PROJ) = temp;
KRATOS_CATCH("")
}
//***************************************
//function to set adequate time step size
double ComputeMinimum_Havg()
{
KRATOS_TRY
double hmin_global = 1e10;
//*******************
//loop over all nodes
double n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
const double havg_i = mHavg[i_node];
//const double hmin_i = mHmin[i_node];
if(havg_i < hmin_global) hmin_global=havg_i;
}
return hmin_global;
KRATOS_CATCH("")
}
//***************************************
//function to set adequate time step size
double ComputeTimeStep(const double CFLNumber, const double MaxDt)
{
KRATOS_TRY
//save the maximum time step
max_dt = MaxDt;
//local variable for time step size
double delta_t = 1e10;
mdelta_t_avg = 1e10;
//getting value of current velocity and of viscosity
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
//*******************
//loop over all nodes
double n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
const array_1d<double, TDim>& v_i = mvel_n1[i_node];
const double havg_i = mHavg[i_node];
const double hmin_i = mHmin[i_node];
double vel_norm = norm_2(v_i);
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (2.0 * vel_norm /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i) );
double delta_t_i_avg = 1.0 / (2.0 * vel_norm /havg_i + 4.0 * mViscosity / (havg_i * havg_i) );
//considering the most restrictive case of neighbor's velocities with similar direction but opposite sense.
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& v_j = mvel_n1[j_neighbour];
double v_diff_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
double temp = v_i[l_comp] - v_j[l_comp];
v_diff_norm += temp*temp;
}
v_diff_norm = sqrt(v_diff_norm);
double delta_t_j = CFLNumber * 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i));
if (delta_t_j < delta_t_i)
delta_t_i = delta_t_j;
}
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
if(delta_t_i_avg < mdelta_t_avg)
mdelta_t_avg = delta_t_i_avg;
}
//*******************
//perform MPI syncronization of the dt (minimum should be kept)
return delta_t;
KRATOS_CATCH("")
}
void UpdateFixedVelocityValues()
{
KRATOS_TRY
//read velocity and pressure data from Kratos
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
const array_1d<double, TDim>& u_i = mvel_n1[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i_fix[comp] = u_i[comp];
}
KRATOS_CATCH("");
}
//**********************************************************************************
//function to solve fluid equations - fractional step 1: compute fractional momentum
void SolveStep1()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute intrinsic time
// double time_inv = 1.0 / delta_t;
double time_inv_avg = 1.0/mdelta_t_avg;
double stabdt_pressure_factor = mstabdt_pressure_factor;
double stabdt_convection_factor = mstabdt_convection_factor;
double tau2_factor = mtau2_factor;
KRATOS_WATCH(stabdt_pressure_factor);
#pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor,stabdt_convection_factor,tau2_factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_avg_i = mHavg[i_node];
array_1d<double, TDim>& a_i = mvel_n1[i_node];
const double nu_i = mViscosity;
double vel_norm = norm_2(a_i);
double tau = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_pressure_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) );
double tau_conv = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_convection_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) );
mTauPressure[i_node] = tau;
mTauConvection[i_node] = tau_conv;
mTau2[i_node] = (mViscosity + h_avg_i*vel_norm*0.5)*tau2_factor;
}
//calculating the convective projection
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPi[i_node];
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_ConvectiveContribution(pi_i, a_i, U_i, a_j, U_j);
}
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
}
mr_matrix_container.AssignVectorToVector(mvel_n, mWork); //mWork = mvel_n
//first step of Runge Kutta
mr_matrix_container.AssignVectorToVector(mvel_n, mvel_n1); //mvel_n1 = mvel_n
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mvel_n1, mPn, mvel_n1, rhs);
mr_matrix_container.Add_Minv_value(mWork, mWork, delta_t / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mvel_n1, mvel_n, 0.5 * delta_t, mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mvel_n1);
// KRATOS_WATCH("end of first stage")
// double vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
//second step
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mvel_n1, mPn, mvel_n1, rhs);
mr_matrix_container.Add_Minv_value(mWork, mWork, delta_t / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mvel_n1, mvel_n, 0.5 * delta_t, mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mvel_n1);
// KRATOS_WATCH("end of second stage")
// vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
//third step
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mvel_n1, mPn, mvel_n1, rhs);
mr_matrix_container.Add_Minv_value(mWork, mWork, delta_t / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mvel_n1, mvel_n, delta_t, mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mvel_n1);
// KRATOS_WATCH("end of thir stage")
// vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
//fourth step
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mvel_n1, mPn, mvel_n1, rhs);
mr_matrix_container.Add_Minv_value(mWork, mWork, delta_t / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector(mWork, mvel_n1);
ApplyVelocityBC(mvel_n1);
// KRATOS_WATCH("end of Step1")
// vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS(
const CalcVectorType& vel,
const ValuesVectorType& pressure,
const CalcVectorType& convective_velocity,
CalcVectorType& rhs)
{
KRATOS_TRY
int n_nodes = vel.size();
//calculating the RHS
array_1d<double, TDim> stab_low;
array_1d<double, TDim> stab_high;
const double nu_i = mViscosity;
const double nu_j = mViscosity;
double inverse_rho = 1.0 / mRho;
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = convective_velocity[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = pressure[i_node];
double edge_tau = mTauConvection[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * f_i[comp] ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = convective_velocity[j_neighbour];
const array_1d<double, TDim>& U_j = vel[j_neighbour];
const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = pressure[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, U_i, a_j, U_j);
edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
edge_ij.Sub_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
//add stabilization
edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i, a_j, U_j);
edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
// double beta = 1.0;
// double beta = beta_i;
// if(beta_j > beta)
// beta = beta_j;
// beta = 1.0;
// edge_ij.Sub_StabContribution(rhs_i, edge_tau*beta, 1.0, stab_low, stab_high);
// edge_ij.Sub_StabContribution(rhs_i, edge_tau, (1.0-beta), stab_low, stab_high);
edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
//add tau2 term
// boost::numeric::ublas::bounded_matrix<double,TDim,TDim>& LL = edge_ij.LaplacianIJ;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// double aaa = 0.0;
// for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
// aaa += LL(k_comp,m_comp) * (U_j[m_comp] - U_i[m_comp]);
// rhs_i[k_comp] -= tau2_i*aaa;
// }
}
}
//apply wall resistance
if(mWallLawIsActive == true)
ComputeWallResistance(vel,rhs);
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
KRATOS_CATCH("")
}
//*************************************************************************
//function to solve fluid equations - fractional step 2: calculate pressure
void SolveStep2(typename TLinearSolver::Pointer pLinearSolver)
{
KRATOS_TRY
//PREREQUISITES
//allocate memory for variables
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//unknown and right-hand side vector
TSystemVectorType dp, rhs;
dp.resize(n_nodes);
rhs.resize(n_nodes);
array_1d<double, TDim> dU_i, dU_j, work_array;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
#ifdef _OPENMP
// double time_inv = 0.0; //1.0/delta_t;
//read the pressure projection from the database
#endif
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, rNodes);
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i = 0.0;
const double& p_i = mPn1[i_node];
const double& p_old_i = mPn[i_node];
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
array_1d<double, TDim>& xi_i = mXi[i_node];
double l_ii = 0.0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
const double& p_old_j = mPn[j_neighbour];
const array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
const array_1d<double, TDim>& xi_j = mXi[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
#ifdef SYMM_PRESS
double edge_tau = 0.5 * (mTauPressure[i_node] + mTauPressure[j_neighbour]);
#else
double edge_tau = mTauPressure[i_node];
#endif
//compute laplacian operator
double sum_l_ikjk;
edge_ij.CalculateScalarLaplacian(sum_l_ikjk);
// sum_l_ikjk *= 2.0;
double sum_l_ikjk_onlydt = sum_l_ikjk * (2.0*delta_t);
sum_l_ikjk *= (2.0*delta_t + edge_tau);
//assemble right-hand side
//pressure contribution
rhs_i -= sum_l_ikjk * (p_j - p_i);
rhs_i += sum_l_ikjk_onlydt * (p_old_j - p_old_i);
//calculating the divergence of the fract vel
edge_ij.Sub_D_v(rhs_i, U_i_curr*mRho, U_j_curr * mRho);
//high order stabilizing term
double temp = 0.0;
edge_ij.Add_div_v(temp, xi_i, xi_j);
rhs_i += edge_tau * temp;
//assemble laplacian matrix
mL(i_node, j_neighbour) = sum_l_ikjk;
l_ii -= sum_l_ikjk;
}
mL(i_node, i_node) = l_ii;
}
if(muse_mass_correction == true)
{
std::cout << "****************************************" << std::endl;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i -= mdiv_error[i_node];
}
}
// //find the max diagonal term
// double max_diag = 0.0;
// for (int i_node = 0; i_node < n_nodes; i_node++) {
// double L_diag = mL(i_node, i_node);
// if (fabs(L_diag) > fabs(max_diag)) max_diag = L_diag;
// }
//respect pressure boundary conditions by penalization
double huge = 1e20;
for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
{
unsigned int i_node = mPressureOutletList[i_pressure];
mL(i_node, i_node) = huge;
rhs[i_node] = 0.0;
}
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mL(i_node, i_node) = max_diag;
// rhs[i_node] = 0.0;
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// mL(i_node, j_neighbour) = 0.0;
// }
// }
//set starting vector for iterative solvers
for (int i_node = 0; i_node < n_nodes; i_node++)
dp[i_node] = 0.0;
//compute row scaling factors
TSystemVectorType scaling_factors(n_nodes);
double* Lvalues = mL.value_data().begin();
SizeType* Lrow_indices = mL.index1_data().begin();
SizeType* Lcol_indices = mL.index2_data().begin();
for (SizeType k = 0; k < mL.size1(); k++)
{
double t = 0.0;
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
for (SizeType j=col_begin; j<col_end; j++)
if( Lcol_indices[j] == k)
{
t = fabs(Lvalues[j]);
}
// t += Lvalues[j]*Lvalues[j];
// t = sqrt(t);
scaling_factors[k] = 1.0/sqrt(t);
}
for (SizeType k = 0; k < mL.size1(); k++)
{
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
double k_factor = scaling_factors[k];
rhs[k] *= k_factor;
for (SizeType j=col_begin; j<col_end; j++)
{
Lvalues[j] *= scaling_factors[Lcol_indices[j]] * k_factor;
}
}
// double huge = 1e20;
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mL(i_node, i_node) = 1.0;
// rhs[i_node] = 0.0;
// }
// KRATOS_WATCH(norm_2(rhs));
// KRATOS_WATCH(norm_frobenius(mL));
pLinearSolver->Solve(mL, dp, rhs);
//apply inverse scaling
for (unsigned int k = 0; k < dp.size(); k++)
dp[k] *= scaling_factors[k];
KRATOS_WATCH(*pLinearSolver)
//KRATOS_WATCH(norm_2(dp));
//update pressure
for (int i_node = 0; i_node < n_nodes; i_node++)
mPn1[i_node] += dp[i_node];
//write pressure and density to Kratos
mr_matrix_container.WriteScalarToDatabase(PRESSURE, mPn1, rNodes);
//compute pressure proj for the next step
#pragma omp parallel for private(work_array)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& xi_i = mXi[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
xi_i[comp] = 0.0;
const double& p_i = mPn1[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(xi_i, p_i, p_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
xi_i[l_comp] *= m_inv;
}
mr_matrix_container.WriteVectorToDatabase(PRESS_PROJ, mXi, rNodes);
KRATOS_CATCH("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 3: correct fractional momentum
void SolveStep3()
{
KRATOS_TRY
//get number of nodes
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//define work array
array_1d<double, TDim> correction;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
double factor = 0.5;
if(massume_constant_dp == true)
factor = 1.0;
//compute end of step momentum
double rho_inv = 1.0 / mRho;
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv,factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
double delta_p_i = (mPn1[i_node] - mPn[i_node]) * rho_inv*factor;
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
correction[l_comp] = 0.0;
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double delta_p_j = (mPn1[j_neighbour] - mPn[j_neighbour]) * rho_inv*factor;
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_grad_p(correction, delta_p_i, delta_p_j);
}
//compute prefactor
double coefficient = delta_t * m_inv;
//correct fractional momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i_curr[comp] += coefficient * correction[comp];
}
ApplyVelocityBC(mvel_n1);
//write velocity of time step n+1 to Kratos
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
//calculate the error on the divergence
if(muse_mass_correction == true)
{
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& div_i_err = mdiv_error[i_node];
div_i_err = 0.0;
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_D_v(div_i_err, U_i_curr*mRho, U_j_curr * mRho);
}
}
}
// KRATOS_WATCH("end of step3")
// double vnorm2 = 0.0;
// for( int i = 0; i< rNodes.size(); i++)
// vnorm2 += pow(mvel_n1[i][0],2) + pow(mvel_n1[i][1],2) + pow(mvel_n1[i][2],2);
// KRATOS_WATCH(sqrt(vnorm2));
KRATOS_CATCH("")
}
//************************************
void ApplyVelocityBC(CalcVectorType& VelArray)
{
KRATOS_TRY
if(mWallLawIsActive == false)
{
//apply conditions on corner edges
int edge_size = medge_nodes_direction.size();
#pragma omp parallel for firstprivate(edge_size)
for (int i = 0; i < edge_size; i++)
{
int i_node = medge_nodes[i];
const array_1d<double, TDim>& direction = medge_nodes_direction[i];
array_1d<double, TDim>& U_i = VelArray[i_node];
double temp=0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
temp += U_i[comp] * direction[comp];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = direction[comp]*temp;
}
//apply conditions on corners
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
array_1d<double, TDim>& U_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = 0.0;
}
}
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
array_1d<double, TDim>& U_i = VelArray[i_node];
array_1d<double, TDim>& an_i = mSlipNormal[i_node];
double projection_length = 0.0;
double normalization = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
normalization += an_i[comp] * an_i[comp];
}
projection_length /= normalization;
//tangential momentum as difference between original and normal momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] -= projection_length * an_i[comp];
}
//fixed condition
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
const array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
array_1d<double, TDim>& u_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i[comp] = u_i_fix[comp];
}
KRATOS_CATCH("")
}
//**************************************
//function to calculate the area normals
void CalculateNormals(ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//2D case
if (TDim == 2)
{
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal2D(cond_it, area_normal);
}//3D case
else if (TDim == 3)
{
//help vectors for cross product
array_1d<double, 3 > v1;
array_1d<double, 3 > v2;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal3D(cond_it, area_normal, v1, v2);
}
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
mSlipNormal.resize(n_nodes);
std::vector<bool> is_slip(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
noalias(mSlipNormal[i_node]) = ZeroVector(TDim);
is_slip[i_node] = false;
}
//loop over all faces
const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
//slip condition
if (cond_it->GetValue(IS_STRUCTURE))
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
array_1d<double, TDim>& slip_normal = mSlipNormal[i_node];
is_slip[i_node] = true;
for (unsigned int comp = 0; comp < TDim; comp++)
{
slip_normal[comp] += node_factor * face_normal[comp];
}
}
}
//fill the list of slip nodes
std::vector< unsigned int> tempmSlipBoundaryList;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (is_slip[i_node] == true)
tempmSlipBoundaryList.push_back(i_node);
}
mSlipBoundaryList.resize(tempmSlipBoundaryList.size(),false);
#pragma omp parallel for
for( int i=0; i<static_cast<int>(tempmSlipBoundaryList.size()); i++)
mSlipBoundaryList[i] = tempmSlipBoundaryList[i];
KRATOS_CATCH("")
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mWork.clear();
mvel_n.clear();
mvel_n1.clear();
mPn.clear();
mPn1.clear();
mHmin.clear();
mHavg.clear();
mSlipNormal.clear();
mNodalFlag.clear();
mFixedVelocities.clear();
mFixedVelocitiesValues.clear();
mPressureOutletList.clear();
// mPressureOutlet.clear();
mSlipBoundaryList.clear();
mL.clear();
mTauPressure.clear();
mTauConvection.clear();
mTau2.clear();
mBeta.clear();
mdiv_error.clear();
KRATOS_CATCH("")
}
void ActivateWallResistance(double Ywall)
{
mWallLawIsActive = true;
mY_wall = Ywall;
}
void ComputePressureStabilization()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
//read time step size from Kratos
// ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
// double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute intrinsic time
// double time_inv = 1.0 / delta_t;
double time_inv_avg = 1.0 / mdelta_t_avg;
double stabdt_pressure_factor = mstabdt_pressure_factor;
KRATOS_WATCH(stabdt_pressure_factor);
#pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_avg_i = mHavg[i_node];
array_1d<double, TDim>& a_i = mvel_n1[i_node];
const double nu_i = mViscosity;
double vel_norm = norm_2(a_i);
double tau = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_pressure_factor * time_inv_avg + (4.0 * nu_i) / (h_avg_i * h_avg_i));
mTauPressure[i_node] = tau;
}
KRATOS_CATCH("");
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void ViscosityCorrectionStep()
{
KRATOS_TRY
int n_nodes = mvel_n1.size();
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
CalcVectorType rhs;
rhs.resize(n_nodes);
//calculating the RHS
// double inverse_rho = 1.0 / mRho;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
//initializing with the external forces (e.g. gravity)
// double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = 0.0 ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ViscousContribution(rhs_i, U_i, mViscosity, U_j, mViscosity);
}
}
//correcting the velocity
mr_matrix_container.Add_Minv_value(mvel_n1, mvel_n1, delta_t, mr_matrix_container.GetInvertedMass(), rhs);
ApplyVelocityBC(mvel_n1);
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
KRATOS_CATCH("")
}
void ComputeViscousForces()
{
KRATOS_TRY
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(FORCE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
int n_nodes = mvel_n1.size();
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, rNodes);
// ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
// double delta_t = CurrentProcessInfo[DELTA_TIME];
CalcVectorType rhs;
rhs.resize(n_nodes);
//calculating the RHS
// double inverse_rho = 1.0 / mRho;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
//initializing with the external forces (e.g. gravity)
// double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = 0.0 ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ViscousContribution(rhs_i, U_i, mViscosity, U_j, mViscosity);
}
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
rhs_i[l_comp] *= m_inv;
}
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
array_1d<double, TDim>& rhs_i = rhs[i_node];
array_1d<double, TDim>& proj_i = mXi[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
rhs_i[l_comp] = mBodyForce[l_comp] + proj_i[l_comp];
}
mr_matrix_container.WriteVectorToDatabase(FORCE, rhs, rNodes);
KRATOS_CATCH("")
}
void ComputeReactions(bool exclude_convection_terms)
{
KRATOS_TRY
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(FORCE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
int n_nodes = mvel_n1.size();
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
CalcVectorType rhs;
rhs.resize(n_nodes);
mr_matrix_container.SetToZero(rhs);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes);
//calculating the RHS
array_1d<double, TDim> stab_low;
array_1d<double, TDim> stab_high;
const double nu_i = mViscosity;
const double nu_j = mViscosity;
double inverse_rho = 1.0 / mRho;
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
double dt_inv = 1.0/delta_t;
if(exclude_convection_terms == true)
{
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
//const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = mPn1[i_node];
//double edge_tau = mTauConvection[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * f_i[comp] ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
//const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = mPn1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, U_i, a_j, U_j);
edge_ij.Add_Gp(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
// edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
edge_ij.Sub_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
//add stabilization
// edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i, a_j, U_j);
// edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
// edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
}
}
}
else
{
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = mPn1[i_node];
double edge_tau = mTauConvection[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * f_i[comp] ;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = mPn1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, U_i, a_j, U_j);
edge_ij.Add_Gp(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
// edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
edge_ij.Sub_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
//add stabilization
edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i, a_j, U_j);
edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
}
}
}
//add inertia terms
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& rhs_i = rhs[i_node];
array_1d<double, TDim>& v_i = mvel_n1[i_node];
array_1d<double, TDim>& vold_i = mvel_n[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] -= m_i*dt_inv*(v_i[comp] - vold_i[comp]) ;
}
//apply wall resistance
if(mWallLawIsActive == true)
ComputeWallResistance(mvel_n1,rhs);
mr_matrix_container.WriteVectorToDatabase(FORCE, rhs, rNodes);
KRATOS_CATCH("");
}
private:
MatrixContainer& mr_matrix_container;
ModelPart& mr_model_part;
bool muse_mass_correction;
//parameters controlling the wall law
bool mWallLawIsActive;
bool mY_wall;
//parameters for controlling the usage of the delta time in the stabilization
double mstabdt_pressure_factor;
double mstabdt_convection_factor;
double medge_detection_angle;
double mtau2_factor;
bool massume_constant_dp;
//nodal values
//velocity vector U at time steps n and n+1
CalcVectorType mWork, mvel_n, mvel_n1, mx;
//pressure vector p at time steps n and n+1
ValuesVectorType mPn, mPn1;
//minimum length of the edges surrounding edges surrounding each nodal point
ValuesVectorType mHmin;
ValuesVectorType mHavg;
CalcVectorType mEdgeDimensions;
//area normal
CalcVectorType mSlipNormal;
//projection terms
CalcVectorType mPi, mXi;
//flag for first time step
bool mFirstStep;
//flag to differentiate interior and boundary nodes
ValuesVectorType mNodalFlag;
//lists of nodes with different types of boundary conditions
IndicesVectorType mSlipBoundaryList, mPressureOutletList, mFixedVelocities;
CalcVectorType mFixedVelocitiesValues;
// ValuesVectorType mPressureOutlet;
//intrinsic time step size
ValuesVectorType mTauPressure;
ValuesVectorType mTauConvection;
ValuesVectorType mTau2;
ValuesVectorType mdiv_error;
//variables for resolving pressure equation
//laplacian matrix
TSystemMatrixType mL;
//constant variables
double mRho;
double mViscosity;
array_1d<double, TDim> mBodyForce;
//variables for convection
ValuesVectorType mBeta;
//variables for edge BCs
IndicesVectorType medge_nodes;
CalcVectorType medge_nodes_direction;
IndicesVectorType mcorner_nodes;
double mdelta_t_avg;
double max_dt;
//***********************************************************
//functions to calculate area normals for boundary conditions
void CalculateNormal2D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y();
area_normal[1] = -(face_geometry[1].X() - face_geometry[0].X());
area_normal[2] = 0.00;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
void CalculateNormal3D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal, array_1d<double, 3 > & v1, array_1d<double, 3 > & v2)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
v1[0] = face_geometry[1].X() - face_geometry[0].X();
v1[1] = face_geometry[1].Y() - face_geometry[0].Y();
v1[2] = face_geometry[1].Z() - face_geometry[0].Z();
v2[0] = face_geometry[2].X() - face_geometry[0].X();
v2[1] = face_geometry[2].Y() - face_geometry[0].Y();
v2[2] = face_geometry[2].Z() - face_geometry[0].Z();
MathUtils<double>::CrossProduct(area_normal, v1, v2);
area_normal *= -0.5;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
//*********************************************************
//function to calculate minimum length of surrounding edges
void CalculateEdgeLengths(ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = rNodes.size();
//reserve memory for storage of nodal coordinates
std::vector< array_1d<double, 3 > > position;
position.resize(n_nodes);
//get position of all nodes
for (typename ModelPart::NodesContainerType::iterator node_it = rNodes.begin(); node_it != rNodes.end(); node_it++)
{
//get the global index of the node
unsigned int i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX));
//save its coordinates locally
noalias(position[i_node]) = node_it->Coordinates();
//initialize minimum edge length with relatively big values
mHmin[i_node] = 1e10;
}
ValuesVectorType& aaa = mr_matrix_container.GetHmin();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
mHmin[i_node] = aaa[i_node];
}
//take unstructured meshes into account
if (TDim == 2)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = sqrt(2.0 * m_i);
}
}
else if (TDim == 3)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = pow(6.0 * m_i, 1.0 / 3.0);
}
}
//compute edge coordinates
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, 3 > & pos_i = position[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, 3 > & pos_j = position[j_neighbour];
array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
for (unsigned int comp = 0; comp < TDim; comp++)
l_k[comp] = pos_i[comp] - pos_j[comp];
}
}
KRATOS_CATCH("")
}
//**************************************
void CornerDectectionHelper(Geometry< Node < 3 > >& face_geometry,
const array_1d<double, 3 > & face_normal,
const double An,
const WeakPointerVector<Condition>& neighb,
const unsigned int i1,
const unsigned int i2,
const unsigned int neighb_index,
std::vector<unsigned int>& edge_nodes,
CalcVectorType& cornern_list
)
{
double acceptable_angle = 45.0 / 180.0 * 3.1; //angles of less than 45 deg will be accepted
double acceptable_cos = cos(acceptable_angle);
if (face_geometry[i1].Id() < face_geometry[i2].Id()) //we do this to add the face ones
{
const array_1d<double, 3 > & neighb_normal = neighb[neighb_index].GetValue(NORMAL);
double neighb_An = norm_2(neighb_normal);
double cos_normal = 1.0 / (An * neighb_An) * inner_prod(face_normal, neighb_normal);
//if the angle is too big between the two normals then the edge in the middle is a corner
if (cos_normal < acceptable_cos)
{
array_1d<double, TDim > edge;
for(unsigned int i = 0 ; i < TDim ; i++)
edge[i] = face_geometry[i2].Coordinates()[i] - face_geometry[i1].Coordinates()[i];
double temp = norm_2(edge);
edge /= temp;
int index1 = face_geometry[i1].FastGetSolutionStepValue(AUX_INDEX);
int index2 = face_geometry[i2].FastGetSolutionStepValue(AUX_INDEX);
edge_nodes[index1] += 1;
edge_nodes[index2] += 1;
double sign1 = inner_prod(cornern_list[index1], edge);
if (sign1 >= 0)
cornern_list[index1] += edge;
else
cornern_list[index1] -= edge;
double sign2 = inner_prod(cornern_list[index2], edge);
if (sign2 >= 0)
cornern_list[index2] += edge;
else
cornern_list[index2] -= edge;
}
}
}
//function to calculate the area normals
void DetectEdges3D(ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
std::vector<unsigned int> temp_edge_nodes(n_nodes);
CalcVectorType temp_cornern_list(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
temp_edge_nodes[i_node] = 0.0;
noalias(temp_cornern_list[i_node]) = ZeroVector(TDim);
}
//loop over all faces
// const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
const array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
double An = norm_2(face_normal);
unsigned int current_id = cond_it->Id();
//slip condition
if (cond_it->GetValue(IS_STRUCTURE) == 1.0) //this is a slip face --> now look for its neighbours
{
const WeakPointerVector<Condition>& neighb = cond_it->GetValue(NEIGHBOUR_CONDITIONS);
//check for neighbour zero
if (neighb[0].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 1, 2, 0, temp_edge_nodes, temp_cornern_list);
//check for neighbour one
if (neighb[1].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 2, 0, 1, temp_edge_nodes, temp_cornern_list);
//check for neighbour two
if (neighb[2].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 0, 1, 2, temp_edge_nodes, temp_cornern_list);
}
}
//fill the list of edge_nodes
std::vector<unsigned int> tempmedge_nodes;
std::vector< array_1d<double,TDim> > tempmedge_nodes_direction;
std::vector<unsigned int> tempmcorner_nodes;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (temp_edge_nodes[i_node] == 2) //node is a edge_node
{
tempmedge_nodes.push_back(i_node);
array_1d<double, TDim>& node_edge = temp_cornern_list[i_node];
node_edge /= norm_2(node_edge);
tempmedge_nodes_direction.push_back(node_edge);
}
else if (temp_edge_nodes[i_node] > 2)
tempmcorner_nodes.push_back(i_node);
}
medge_nodes.resize(tempmedge_nodes.size(),false);
medge_nodes_direction.resize(tempmedge_nodes_direction.size(),false);
mcorner_nodes.resize(tempmcorner_nodes.size(),false);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(tempmedge_nodes.size()); i++)
{
medge_nodes[i] = tempmedge_nodes[i];
medge_nodes_direction[i] = tempmedge_nodes_direction[i];
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(tempmcorner_nodes.size()); i++)
{
mcorner_nodes[i] = tempmcorner_nodes[i];
}
for (unsigned int i = 0; i < mcorner_nodes.size(); i++)
{
KRATOS_WATCH(mcorner_nodes[i]);
}
KRATOS_CATCH("")
}
void ComputeWallResistance(
const CalcVectorType& vel,
CalcVectorType& rhs
)
{
//parameters:
double k = 0.41;
double B = 5.1;
double density = mRho;
double mu = mViscosity;
double toll = 1e-6;
double ym = mY_wall; //0.0825877; //0.0093823
double y_plus_incercept = 10.9931899;
unsigned int itmax = 100;
if (mu == 0)
KRATOS_THROW_ERROR(std::logic_error, "it is not possible to use the wall law with 0 viscosity", "");
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size,B,density,mu,toll,ym,y_plus_incercept,itmax)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
//compute the modulus of the velocity
double mod_vel = 0.0;
double area = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
mod_vel += U_i[comp] * U_i[comp];
area += an_i[comp] * an_i[comp];
}
mod_vel = sqrt(mod_vel);
area = sqrt(area);
//now compute the skin friction
double mod_uthaw = sqrt(mod_vel * mu / ym);
const double y_plus = ym * mod_uthaw / mu;
if (y_plus > y_plus_incercept)
{
//begin cicle to calculate the real u_thaw's module:
unsigned int it = 0;
double dx = 1e10;
// KRATOS_WATCH(fabs(dx));
while (fabs(dx) > toll * mod_uthaw && it < itmax)
{
double a = 1.0 / k;
double temp = a * log(ym * mod_uthaw / mu) + B;
double y = mod_uthaw * (temp) - mod_vel;
double y1 = temp + a;
dx = y / y1;
mod_uthaw -= dx;
it = it + 1;
}
// KRATOS_WATCH(toll*mod_uthaw);
// KRATOS_WATCH(area);
// KRATOS_WATCH(it);
if (it == itmax)
std::cout << "attention max number of iterations exceeded in wall law computation" << std::endl;
}
// else
// {
// for (unsigned int comp = 0; comp < TDim; comp++)
// rhs_i[comp] -= U_i[comp] * area * mu / (density*ym) ;
// }
if (mod_vel > 1e-12)
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] -= U_i[comp] * area * mod_uthaw * mod_uthaw * density / (mod_vel);
}
}
};
} //namespace Kratos
//#undef SYMM_PRESS
#endif //KRATOS_EDGEBASED_FLUID_SOLVER_H_INCLUDED defined
|
structDef.c |
typedef unsigned int size_t;
typedef unsigned char __u_char;
typedef unsigned short int __u_short;
typedef unsigned int __u_int;
typedef unsigned long int __u_long;
typedef signed char __int8_t;
typedef unsigned char __uint8_t;
typedef signed short int __int16_t;
typedef unsigned short int __uint16_t;
typedef signed int __int32_t;
typedef unsigned int __uint32_t;
__extension__ typedef signed long long int __int64_t;
__extension__ typedef unsigned long long int __uint64_t;
__extension__ typedef long long int __quad_t;
__extension__ typedef unsigned long long int __u_quad_t;
__extension__ typedef __u_quad_t __dev_t;
__extension__ typedef unsigned int __uid_t;
__extension__ typedef unsigned int __gid_t;
__extension__ typedef unsigned long int __ino_t;
__extension__ typedef __u_quad_t __ino64_t;
__extension__ typedef unsigned int __mode_t;
__extension__ typedef unsigned int __nlink_t;
__extension__ typedef long int __off_t;
__extension__ typedef __quad_t __off64_t;
__extension__ typedef int __pid_t;
__extension__ typedef struct { int __val[2]; } __fsid_t;
__extension__ typedef long int __clock_t;
__extension__ typedef unsigned long int __rlim_t;
__extension__ typedef __u_quad_t __rlim64_t;
__extension__ typedef unsigned int __id_t;
__extension__ typedef long int __time_t;
__extension__ typedef unsigned int __useconds_t;
__extension__ typedef long int __suseconds_t;
__extension__ typedef int __daddr_t;
__extension__ typedef int __key_t;
__extension__ typedef int __clockid_t;
__extension__ typedef void * __timer_t;
__extension__ typedef long int __blksize_t;
__extension__ typedef long int __blkcnt_t;
__extension__ typedef __quad_t __blkcnt64_t;
__extension__ typedef unsigned long int __fsblkcnt_t;
__extension__ typedef __u_quad_t __fsblkcnt64_t;
__extension__ typedef unsigned long int __fsfilcnt_t;
__extension__ typedef __u_quad_t __fsfilcnt64_t;
__extension__ typedef int __fsword_t;
__extension__ typedef int __ssize_t;
__extension__ typedef long int __syscall_slong_t;
__extension__ typedef unsigned long int __syscall_ulong_t;
typedef __off64_t __loff_t;
typedef __quad_t *__qaddr_t;
typedef char *__caddr_t;
__extension__ typedef int __intptr_t;
__extension__ typedef unsigned int __socklen_t;
struct _IO_FILE;
typedef struct _IO_FILE FILE;
typedef struct _IO_FILE __FILE;
typedef struct
{
int __count;
union
{
unsigned int __wch;
char __wchb[4];
} __value;
} __mbstate_t;
typedef struct
{
__off_t __pos;
__mbstate_t __state;
} _G_fpos_t;
typedef struct
{
__off64_t __pos;
__mbstate_t __state;
} _G_fpos64_t;
typedef __builtin_va_list __gnuc_va_list;
struct _IO_jump_t; struct _IO_FILE;
typedef void _IO_lock_t;
struct _IO_marker {
struct _IO_marker *_next;
struct _IO_FILE *_sbuf;
int _pos;
};
enum __codecvt_result
{
__codecvt_ok,
__codecvt_partial,
__codecvt_error,
__codecvt_noconv
};
struct _IO_FILE {
int _flags;
char* _IO_read_ptr;
char* _IO_read_end;
char* _IO_read_base;
char* _IO_write_base;
char* _IO_write_ptr;
char* _IO_write_end;
char* _IO_buf_base;
char* _IO_buf_end;
char *_IO_save_base;
char *_IO_backup_base;
char *_IO_save_end;
struct _IO_marker *_markers;
struct _IO_FILE *_chain;
int _fileno;
int _flags2;
__off_t _old_offset;
unsigned short _cur_column;
signed char _vtable_offset;
char _shortbuf[1];
_IO_lock_t *_lock;
__off64_t _offset;
void *__pad1;
void *__pad2;
void *__pad3;
void *__pad4;
size_t __pad5;
int _mode;
char _unused2[15 * sizeof (int) - 4 * sizeof (void *) - sizeof (size_t)];
};
typedef struct _IO_FILE _IO_FILE;
struct _IO_FILE_plus;
extern struct _IO_FILE_plus _IO_2_1_stdin_;
extern struct _IO_FILE_plus _IO_2_1_stdout_;
extern struct _IO_FILE_plus _IO_2_1_stderr_;
typedef __ssize_t __io_read_fn (void *__cookie, char *__buf, size_t __nbytes);
typedef __ssize_t __io_write_fn (void *__cookie, const char *__buf,
size_t __n);
typedef int __io_seek_fn (void *__cookie, __off64_t *__pos, int __w);
typedef int __io_close_fn (void *__cookie);
extern int __underflow (_IO_FILE *);
extern int __uflow (_IO_FILE *);
extern int __overflow (_IO_FILE *, int);
extern int _IO_getc (_IO_FILE *__fp);
extern int _IO_putc (int __c, _IO_FILE *__fp);
extern int _IO_feof (_IO_FILE *__fp) __attribute__ ((__nothrow__ , __leaf__));
extern int _IO_ferror (_IO_FILE *__fp) __attribute__ ((__nothrow__ , __leaf__));
extern int _IO_peekc_locked (_IO_FILE *__fp);
extern void _IO_flockfile (_IO_FILE *) __attribute__ ((__nothrow__ , __leaf__));
extern void _IO_funlockfile (_IO_FILE *) __attribute__ ((__nothrow__ , __leaf__));
extern int _IO_ftrylockfile (_IO_FILE *) __attribute__ ((__nothrow__ , __leaf__));
extern int _IO_vfscanf (_IO_FILE * __restrict, const char * __restrict,
__gnuc_va_list, int *__restrict);
extern int _IO_vfprintf (_IO_FILE *__restrict, const char *__restrict,
__gnuc_va_list);
extern __ssize_t _IO_padn (_IO_FILE *, int, __ssize_t);
extern size_t _IO_sgetn (_IO_FILE *, void *, size_t);
extern __off64_t _IO_seekoff (_IO_FILE *, __off64_t, int, int);
extern __off64_t _IO_seekpos (_IO_FILE *, __off64_t, int);
extern void _IO_free_backup_area (_IO_FILE *) __attribute__ ((__nothrow__ , __leaf__));
typedef __gnuc_va_list va_list;
typedef __off_t off_t;
typedef __ssize_t ssize_t;
typedef _G_fpos_t fpos_t;
extern struct _IO_FILE *stdin;
extern struct _IO_FILE *stdout;
extern struct _IO_FILE *stderr;
extern int remove (const char *__filename) __attribute__ ((__nothrow__ , __leaf__));
extern int rename (const char *__old, const char *__new) __attribute__ ((__nothrow__ , __leaf__));
extern int renameat (int __oldfd, const char *__old, int __newfd,
const char *__new) __attribute__ ((__nothrow__ , __leaf__));
extern FILE *tmpfile (void) ;
extern char *tmpnam (char *__s) __attribute__ ((__nothrow__ , __leaf__)) ;
extern char *tmpnam_r (char *__s) __attribute__ ((__nothrow__ , __leaf__)) ;
extern char *tempnam (const char *__dir, const char *__pfx)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) ;
extern int fclose (FILE *__stream);
extern int fflush (FILE *__stream);
extern int fflush_unlocked (FILE *__stream);
extern FILE *fopen (const char *__restrict __filename,
const char *__restrict __modes) ;
extern FILE *freopen (const char *__restrict __filename,
const char *__restrict __modes,
FILE *__restrict __stream) ;
extern FILE *fdopen (int __fd, const char *__modes) __attribute__ ((__nothrow__ , __leaf__)) ;
extern FILE *fmemopen (void *__s, size_t __len, const char *__modes)
__attribute__ ((__nothrow__ , __leaf__)) ;
extern FILE *open_memstream (char **__bufloc, size_t *__sizeloc) __attribute__ ((__nothrow__ , __leaf__)) ;
extern void setbuf (FILE *__restrict __stream, char *__restrict __buf) __attribute__ ((__nothrow__ , __leaf__));
extern int setvbuf (FILE *__restrict __stream, char *__restrict __buf,
int __modes, size_t __n) __attribute__ ((__nothrow__ , __leaf__));
extern void setbuffer (FILE *__restrict __stream, char *__restrict __buf,
size_t __size) __attribute__ ((__nothrow__ , __leaf__));
extern void setlinebuf (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__));
extern int fprintf (FILE *__restrict __stream,
const char *__restrict __format, ...);
extern int printf (const char *__restrict __format, ...);
extern int sprintf (char *__restrict __s,
const char *__restrict __format, ...) __attribute__ ((__nothrow__));
extern int vfprintf (FILE *__restrict __s, const char *__restrict __format,
__gnuc_va_list __arg);
extern int vprintf (const char *__restrict __format, __gnuc_va_list __arg);
extern int vsprintf (char *__restrict __s, const char *__restrict __format,
__gnuc_va_list __arg) __attribute__ ((__nothrow__));
extern int snprintf (char *__restrict __s, size_t __maxlen,
const char *__restrict __format, ...)
__attribute__ ((__nothrow__)) __attribute__ ((__format__ (__printf__, 3, 4)));
extern int vsnprintf (char *__restrict __s, size_t __maxlen,
const char *__restrict __format, __gnuc_va_list __arg)
__attribute__ ((__nothrow__)) __attribute__ ((__format__ (__printf__, 3, 0)));
extern int vdprintf (int __fd, const char *__restrict __fmt,
__gnuc_va_list __arg)
__attribute__ ((__format__ (__printf__, 2, 0)));
extern int dprintf (int __fd, const char *__restrict __fmt, ...)
__attribute__ ((__format__ (__printf__, 2, 3)));
extern int fscanf (FILE *__restrict __stream,
const char *__restrict __format, ...) ;
extern int scanf (const char *__restrict __format, ...) ;
extern int sscanf (const char *__restrict __s,
const char *__restrict __format, ...) __attribute__ ((__nothrow__ , __leaf__));
extern int fscanf (FILE *__restrict __stream, const char *__restrict __format, ...) __asm__ ("" "__isoc99_fscanf") ;
extern int scanf (const char *__restrict __format, ...) __asm__ ("" "__isoc99_scanf") ;
extern int sscanf (const char *__restrict __s, const char *__restrict __format, ...) __asm__ ("" "__isoc99_sscanf") __attribute__ ((__nothrow__ , __leaf__));
extern int vfscanf (FILE *__restrict __s, const char *__restrict __format,
__gnuc_va_list __arg)
__attribute__ ((__format__ (__scanf__, 2, 0))) ;
extern int vscanf (const char *__restrict __format, __gnuc_va_list __arg)
__attribute__ ((__format__ (__scanf__, 1, 0))) ;
extern int vsscanf (const char *__restrict __s,
const char *__restrict __format, __gnuc_va_list __arg)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__format__ (__scanf__, 2, 0)));
extern int vfscanf (FILE *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vfscanf")
__attribute__ ((__format__ (__scanf__, 2, 0))) ;
extern int vscanf (const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vscanf")
__attribute__ ((__format__ (__scanf__, 1, 0))) ;
extern int vsscanf (const char *__restrict __s, const char *__restrict __format, __gnuc_va_list __arg) __asm__ ("" "__isoc99_vsscanf") __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__format__ (__scanf__, 2, 0)));
extern int fgetc (FILE *__stream);
extern int getc (FILE *__stream);
extern int getchar (void);
extern int getc_unlocked (FILE *__stream);
extern int getchar_unlocked (void);
extern int fgetc_unlocked (FILE *__stream);
extern int fputc (int __c, FILE *__stream);
extern int putc (int __c, FILE *__stream);
extern int putchar (int __c);
extern int fputc_unlocked (int __c, FILE *__stream);
extern int putc_unlocked (int __c, FILE *__stream);
extern int putchar_unlocked (int __c);
extern int getw (FILE *__stream);
extern int putw (int __w, FILE *__stream);
extern char *fgets (char *__restrict __s, int __n, FILE *__restrict __stream)
;
extern __ssize_t __getdelim (char **__restrict __lineptr,
size_t *__restrict __n, int __delimiter,
FILE *__restrict __stream) ;
extern __ssize_t getdelim (char **__restrict __lineptr,
size_t *__restrict __n, int __delimiter,
FILE *__restrict __stream) ;
extern __ssize_t getline (char **__restrict __lineptr,
size_t *__restrict __n,
FILE *__restrict __stream) ;
extern int fputs (const char *__restrict __s, FILE *__restrict __stream);
extern int puts (const char *__s);
extern int ungetc (int __c, FILE *__stream);
extern size_t fread (void *__restrict __ptr, size_t __size,
size_t __n, FILE *__restrict __stream) ;
extern size_t fwrite (const void *__restrict __ptr, size_t __size,
size_t __n, FILE *__restrict __s);
extern size_t fread_unlocked (void *__restrict __ptr, size_t __size,
size_t __n, FILE *__restrict __stream) ;
extern size_t fwrite_unlocked (const void *__restrict __ptr, size_t __size,
size_t __n, FILE *__restrict __stream);
extern int fseek (FILE *__stream, long int __off, int __whence);
extern long int ftell (FILE *__stream) ;
extern void rewind (FILE *__stream);
extern int fseeko (FILE *__stream, __off_t __off, int __whence);
extern __off_t ftello (FILE *__stream) ;
extern int fgetpos (FILE *__restrict __stream, fpos_t *__restrict __pos);
extern int fsetpos (FILE *__stream, const fpos_t *__pos);
extern void clearerr (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__));
extern int feof (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int ferror (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ;
extern void clearerr_unlocked (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__));
extern int feof_unlocked (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int ferror_unlocked (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ;
extern void perror (const char *__s);
extern int sys_nerr;
extern const char *const sys_errlist[];
extern int fileno (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int fileno_unlocked (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ;
extern FILE *popen (const char *__command, const char *__modes) ;
extern int pclose (FILE *__stream);
extern char *ctermid (char *__s) __attribute__ ((__nothrow__ , __leaf__));
extern void flockfile (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__));
extern int ftrylockfile (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__)) ;
extern void funlockfile (FILE *__stream) __attribute__ ((__nothrow__ , __leaf__));
typedef unsigned int wchar_t;
typedef enum
{
P_ALL,
P_PID,
P_PGID
} idtype_t;
static __inline unsigned int
__bswap_32 (unsigned int __bsx)
{
return __builtin_bswap32 (__bsx);
}
static __inline __uint64_t
__bswap_64 (__uint64_t __bsx)
{
return __builtin_bswap64 (__bsx);
}
union wait
{
int w_status;
struct
{
unsigned int __w_termsig:7;
unsigned int __w_coredump:1;
unsigned int __w_retcode:8;
unsigned int:16;
} __wait_terminated;
struct
{
unsigned int __w_stopval:8;
unsigned int __w_stopsig:8;
unsigned int:16;
} __wait_stopped;
};
typedef union
{
union wait *__uptr;
int *__iptr;
} __WAIT_STATUS __attribute__ ((__transparent_union__));
typedef struct
{
int quot;
int rem;
} div_t;
typedef struct
{
long int quot;
long int rem;
} ldiv_t;
__extension__ typedef struct
{
long long int quot;
long long int rem;
} lldiv_t;
extern size_t __ctype_get_mb_cur_max (void) __attribute__ ((__nothrow__ , __leaf__)) ;
extern double atof (const char *__nptr)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1))) ;
extern int atoi (const char *__nptr)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1))) ;
extern long int atol (const char *__nptr)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1))) ;
__extension__ extern long long int atoll (const char *__nptr)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1))) ;
extern double strtod (const char *__restrict __nptr,
char **__restrict __endptr)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern float strtof (const char *__restrict __nptr,
char **__restrict __endptr) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern long double strtold (const char *__restrict __nptr,
char **__restrict __endptr)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern long int strtol (const char *__restrict __nptr,
char **__restrict __endptr, int __base)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern unsigned long int strtoul (const char *__restrict __nptr,
char **__restrict __endptr, int __base)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
__extension__
extern long long int strtoq (const char *__restrict __nptr,
char **__restrict __endptr, int __base)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
__extension__
extern unsigned long long int strtouq (const char *__restrict __nptr,
char **__restrict __endptr, int __base)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
__extension__
extern long long int strtoll (const char *__restrict __nptr,
char **__restrict __endptr, int __base)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
__extension__
extern unsigned long long int strtoull (const char *__restrict __nptr,
char **__restrict __endptr, int __base)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern char *l64a (long int __n) __attribute__ ((__nothrow__ , __leaf__)) ;
extern long int a64l (const char *__s)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1))) ;
typedef __u_char u_char;
typedef __u_short u_short;
typedef __u_int u_int;
typedef __u_long u_long;
typedef __quad_t quad_t;
typedef __u_quad_t u_quad_t;
typedef __fsid_t fsid_t;
typedef __loff_t loff_t;
typedef __ino_t ino_t;
typedef __dev_t dev_t;
typedef __gid_t gid_t;
typedef __mode_t mode_t;
typedef __nlink_t nlink_t;
typedef __uid_t uid_t;
typedef __pid_t pid_t;
typedef __id_t id_t;
typedef __daddr_t daddr_t;
typedef __caddr_t caddr_t;
typedef __key_t key_t;
typedef __clock_t clock_t;
typedef __time_t time_t;
typedef __clockid_t clockid_t;
typedef __timer_t timer_t;
typedef unsigned long int ulong;
typedef unsigned short int ushort;
typedef unsigned int uint;
typedef int int8_t __attribute__ ((__mode__ (__QI__)));
typedef int int16_t __attribute__ ((__mode__ (__HI__)));
typedef int int32_t __attribute__ ((__mode__ (__SI__)));
typedef int int64_t __attribute__ ((__mode__ (__DI__)));
typedef unsigned int u_int8_t __attribute__ ((__mode__ (__QI__)));
typedef unsigned int u_int16_t __attribute__ ((__mode__ (__HI__)));
typedef unsigned int u_int32_t __attribute__ ((__mode__ (__SI__)));
typedef unsigned int u_int64_t __attribute__ ((__mode__ (__DI__)));
typedef int register_t __attribute__ ((__mode__ (__word__)));
typedef int __sig_atomic_t;
typedef struct
{
unsigned long int __val[(1024 / (8 * sizeof (unsigned long int)))];
} __sigset_t;
typedef __sigset_t sigset_t;
struct timespec
{
__time_t tv_sec;
__syscall_slong_t tv_nsec;
};
struct timeval
{
__time_t tv_sec;
__suseconds_t tv_usec;
};
typedef __suseconds_t suseconds_t;
typedef long int __fd_mask;
typedef struct
{
__fd_mask __fds_bits[1024 / (8 * (int) sizeof (__fd_mask))];
} fd_set;
typedef __fd_mask fd_mask;
extern int select (int __nfds, fd_set *__restrict __readfds,
fd_set *__restrict __writefds,
fd_set *__restrict __exceptfds,
struct timeval *__restrict __timeout);
extern int pselect (int __nfds, fd_set *__restrict __readfds,
fd_set *__restrict __writefds,
fd_set *__restrict __exceptfds,
const struct timespec *__restrict __timeout,
const __sigset_t *__restrict __sigmask);
__extension__
extern unsigned int gnu_dev_major (unsigned long long int __dev)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
__extension__
extern unsigned int gnu_dev_minor (unsigned long long int __dev)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
__extension__
extern unsigned long long int gnu_dev_makedev (unsigned int __major,
unsigned int __minor)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
typedef __blksize_t blksize_t;
typedef __blkcnt_t blkcnt_t;
typedef __fsblkcnt_t fsblkcnt_t;
typedef __fsfilcnt_t fsfilcnt_t;
typedef unsigned long int pthread_t;
union pthread_attr_t
{
char __size[36];
long int __align;
};
typedef union pthread_attr_t pthread_attr_t;
typedef struct __pthread_internal_slist
{
struct __pthread_internal_slist *__next;
} __pthread_slist_t;
typedef union
{
struct __pthread_mutex_s
{
int __lock;
unsigned int __count;
int __owner;
int __kind;
unsigned int __nusers;
} __data;
char __size[24];
long int __align;
} pthread_mutex_t;
typedef union
{
char __size[4];
long int __align;
} pthread_mutexattr_t;
typedef union
{
struct
{
int __lock;
unsigned int __futex;
__extension__ unsigned long long int __total_seq;
__extension__ unsigned long long int __wakeup_seq;
__extension__ unsigned long long int __woken_seq;
void *__mutex;
unsigned int __nwaiters;
unsigned int __broadcast_seq;
} __data;
char __size[48];
__extension__ long long int __align;
} pthread_cond_t;
typedef union
{
char __size[4];
long int __align;
} pthread_condattr_t;
typedef unsigned int pthread_key_t;
typedef int pthread_once_t;
typedef union
{
struct
{
int __lock;
unsigned int __nr_readers;
unsigned int __readers_wakeup;
unsigned int __writer_wakeup;
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
unsigned char __flags;
unsigned char __shared;
unsigned char __pad1;
unsigned char __pad2;
int __writer;
} __data;
char __size[32];
long int __align;
} pthread_rwlock_t;
typedef union
{
char __size[8];
long int __align;
} pthread_rwlockattr_t;
typedef volatile int pthread_spinlock_t;
typedef union
{
char __size[20];
long int __align;
} pthread_barrier_t;
typedef union
{
char __size[4];
int __align;
} pthread_barrierattr_t;
extern long int random (void) __attribute__ ((__nothrow__ , __leaf__));
extern void srandom (unsigned int __seed) __attribute__ ((__nothrow__ , __leaf__));
extern char *initstate (unsigned int __seed, char *__statebuf,
size_t __statelen) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern char *setstate (char *__statebuf) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
struct random_data
{
int32_t *fptr;
int32_t *rptr;
int32_t *state;
int rand_type;
int rand_deg;
int rand_sep;
int32_t *end_ptr;
};
extern int random_r (struct random_data *__restrict __buf,
int32_t *__restrict __result) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int srandom_r (unsigned int __seed, struct random_data *__buf)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern int initstate_r (unsigned int __seed, char *__restrict __statebuf,
size_t __statelen,
struct random_data *__restrict __buf)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2, 4)));
extern int setstate_r (char *__restrict __statebuf,
struct random_data *__restrict __buf)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int rand (void) __attribute__ ((__nothrow__ , __leaf__));
extern void srand (unsigned int __seed) __attribute__ ((__nothrow__ , __leaf__));
extern int rand_r (unsigned int *__seed) __attribute__ ((__nothrow__ , __leaf__));
extern double drand48 (void) __attribute__ ((__nothrow__ , __leaf__));
extern double erand48 (unsigned short int __xsubi[3]) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern long int lrand48 (void) __attribute__ ((__nothrow__ , __leaf__));
extern long int nrand48 (unsigned short int __xsubi[3])
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern long int mrand48 (void) __attribute__ ((__nothrow__ , __leaf__));
extern long int jrand48 (unsigned short int __xsubi[3])
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern void srand48 (long int __seedval) __attribute__ ((__nothrow__ , __leaf__));
extern unsigned short int *seed48 (unsigned short int __seed16v[3])
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern void lcong48 (unsigned short int __param[7]) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
struct drand48_data
{
unsigned short int __x[3];
unsigned short int __old_x[3];
unsigned short int __c;
unsigned short int __init;
__extension__ unsigned long long int __a;
};
extern int drand48_r (struct drand48_data *__restrict __buffer,
double *__restrict __result) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int erand48_r (unsigned short int __xsubi[3],
struct drand48_data *__restrict __buffer,
double *__restrict __result) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int lrand48_r (struct drand48_data *__restrict __buffer,
long int *__restrict __result)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int nrand48_r (unsigned short int __xsubi[3],
struct drand48_data *__restrict __buffer,
long int *__restrict __result)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int mrand48_r (struct drand48_data *__restrict __buffer,
long int *__restrict __result)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int jrand48_r (unsigned short int __xsubi[3],
struct drand48_data *__restrict __buffer,
long int *__restrict __result)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int srand48_r (long int __seedval, struct drand48_data *__buffer)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern int seed48_r (unsigned short int __seed16v[3],
struct drand48_data *__buffer) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int lcong48_r (unsigned short int __param[7],
struct drand48_data *__buffer)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern void *malloc (size_t __size) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) ;
extern void *calloc (size_t __nmemb, size_t __size)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) ;
extern void *realloc (void *__ptr, size_t __size)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__warn_unused_result__));
extern void free (void *__ptr) __attribute__ ((__nothrow__ , __leaf__));
extern void cfree (void *__ptr) __attribute__ ((__nothrow__ , __leaf__));
extern void *alloca (size_t __size) __attribute__ ((__nothrow__ , __leaf__));
extern void *valloc (size_t __size) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) ;
extern int posix_memalign (void **__memptr, size_t __alignment, size_t __size)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern void *aligned_alloc (size_t __alignment, size_t __size)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) __attribute__ ((__alloc_size__ (2))) ;
extern void abort (void) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
extern int atexit (void (*__func) (void)) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int at_quick_exit (void (*__func) (void)) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int on_exit (void (*__func) (int __status, void *__arg), void *__arg)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern void exit (int __status) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
extern void quick_exit (int __status) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
extern void _Exit (int __status) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
extern char *getenv (const char *__name) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int putenv (char *__string) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int setenv (const char *__name, const char *__value, int __replace)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern int unsetenv (const char *__name) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int clearenv (void) __attribute__ ((__nothrow__ , __leaf__));
extern char *mktemp (char *__template) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int mkstemp (char *__template) __attribute__ ((__nonnull__ (1))) ;
extern int mkstemps (char *__template, int __suffixlen) __attribute__ ((__nonnull__ (1))) ;
extern char *mkdtemp (char *__template) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int system (const char *__command) ;
extern char *realpath (const char *__restrict __name,
char *__restrict __resolved) __attribute__ ((__nothrow__ , __leaf__)) ;
typedef int (*__compar_fn_t) (const void *, const void *);
extern void *bsearch (const void *__key, const void *__base,
size_t __nmemb, size_t __size, __compar_fn_t __compar)
__attribute__ ((__nonnull__ (1, 2, 5))) ;
extern void qsort (void *__base, size_t __nmemb, size_t __size,
__compar_fn_t __compar) __attribute__ ((__nonnull__ (1, 4)));
extern int abs (int __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)) ;
extern long int labs (long int __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)) ;
__extension__ extern long long int llabs (long long int __x)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)) ;
extern div_t div (int __numer, int __denom)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)) ;
extern ldiv_t ldiv (long int __numer, long int __denom)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)) ;
__extension__ extern lldiv_t lldiv (long long int __numer,
long long int __denom)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)) ;
extern char *ecvt (double __value, int __ndigit, int *__restrict __decpt,
int *__restrict __sign) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3, 4))) ;
extern char *fcvt (double __value, int __ndigit, int *__restrict __decpt,
int *__restrict __sign) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3, 4))) ;
extern char *gcvt (double __value, int __ndigit, char *__buf)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3))) ;
extern char *qecvt (long double __value, int __ndigit,
int *__restrict __decpt, int *__restrict __sign)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3, 4))) ;
extern char *qfcvt (long double __value, int __ndigit,
int *__restrict __decpt, int *__restrict __sign)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3, 4))) ;
extern char *qgcvt (long double __value, int __ndigit, char *__buf)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3))) ;
extern int ecvt_r (double __value, int __ndigit, int *__restrict __decpt,
int *__restrict __sign, char *__restrict __buf,
size_t __len) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3, 4, 5)));
extern int fcvt_r (double __value, int __ndigit, int *__restrict __decpt,
int *__restrict __sign, char *__restrict __buf,
size_t __len) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3, 4, 5)));
extern int qecvt_r (long double __value, int __ndigit,
int *__restrict __decpt, int *__restrict __sign,
char *__restrict __buf, size_t __len)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3, 4, 5)));
extern int qfcvt_r (long double __value, int __ndigit,
int *__restrict __decpt, int *__restrict __sign,
char *__restrict __buf, size_t __len)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (3, 4, 5)));
extern int mblen (const char *__s, size_t __n) __attribute__ ((__nothrow__ , __leaf__));
extern int mbtowc (wchar_t *__restrict __pwc,
const char *__restrict __s, size_t __n) __attribute__ ((__nothrow__ , __leaf__));
extern int wctomb (char *__s, wchar_t __wchar) __attribute__ ((__nothrow__ , __leaf__));
extern size_t mbstowcs (wchar_t *__restrict __pwcs,
const char *__restrict __s, size_t __n) __attribute__ ((__nothrow__ , __leaf__));
extern size_t wcstombs (char *__restrict __s,
const wchar_t *__restrict __pwcs, size_t __n)
__attribute__ ((__nothrow__ , __leaf__));
extern int rpmatch (const char *__response) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int getsubopt (char **__restrict __optionp,
char *const *__restrict __tokens,
char **__restrict __valuep)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2, 3))) ;
extern int getloadavg (double __loadavg[], int __nelem)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
typedef float float_t;
typedef double double_t;
extern double acos (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __acos (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double asin (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __asin (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double atan (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __atan (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double atan2 (double __y, double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __atan2 (double __y, double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double cos (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __cos (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double sin (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __sin (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double tan (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __tan (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double cosh (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __cosh (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double sinh (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __sinh (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double tanh (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __tanh (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double acosh (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __acosh (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double asinh (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __asinh (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double atanh (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __atanh (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double exp (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __exp (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double frexp (double __x, int *__exponent) __attribute__ ((__nothrow__ , __leaf__)); extern double __frexp (double __x, int *__exponent) __attribute__ ((__nothrow__ , __leaf__));
extern double ldexp (double __x, int __exponent) __attribute__ ((__nothrow__ , __leaf__)); extern double __ldexp (double __x, int __exponent) __attribute__ ((__nothrow__ , __leaf__));
extern double log (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __log (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double log10 (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __log10 (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double modf (double __x, double *__iptr) __attribute__ ((__nothrow__ , __leaf__)); extern double __modf (double __x, double *__iptr) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern double expm1 (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __expm1 (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double log1p (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __log1p (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double logb (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __logb (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double exp2 (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __exp2 (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double log2 (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __log2 (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double pow (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)); extern double __pow (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__));
extern double sqrt (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __sqrt (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double hypot (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)); extern double __hypot (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__));
extern double cbrt (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __cbrt (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double ceil (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __ceil (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double fabs (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __fabs (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double floor (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __floor (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double fmod (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)); extern double __fmod (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__));
extern int __isinf (double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __finite (double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int isinf (double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int finite (double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double drem (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)); extern double __drem (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__));
extern double significand (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __significand (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double copysign (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __copysign (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double nan (const char *__tagb) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __nan (const char *__tagb) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __isnan (double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int isnan (double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double j0 (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __j0 (double) __attribute__ ((__nothrow__ , __leaf__));
extern double j1 (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __j1 (double) __attribute__ ((__nothrow__ , __leaf__));
extern double jn (int, double) __attribute__ ((__nothrow__ , __leaf__)); extern double __jn (int, double) __attribute__ ((__nothrow__ , __leaf__));
extern double y0 (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __y0 (double) __attribute__ ((__nothrow__ , __leaf__));
extern double y1 (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __y1 (double) __attribute__ ((__nothrow__ , __leaf__));
extern double yn (int, double) __attribute__ ((__nothrow__ , __leaf__)); extern double __yn (int, double) __attribute__ ((__nothrow__ , __leaf__));
extern double erf (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __erf (double) __attribute__ ((__nothrow__ , __leaf__));
extern double erfc (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __erfc (double) __attribute__ ((__nothrow__ , __leaf__));
extern double lgamma (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __lgamma (double) __attribute__ ((__nothrow__ , __leaf__));
extern double tgamma (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __tgamma (double) __attribute__ ((__nothrow__ , __leaf__));
extern double gamma (double) __attribute__ ((__nothrow__ , __leaf__)); extern double __gamma (double) __attribute__ ((__nothrow__ , __leaf__));
extern double lgamma_r (double, int *__signgamp) __attribute__ ((__nothrow__ , __leaf__)); extern double __lgamma_r (double, int *__signgamp) __attribute__ ((__nothrow__ , __leaf__));
extern double rint (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __rint (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double nextafter (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __nextafter (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double nexttoward (double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __nexttoward (double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double remainder (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)); extern double __remainder (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__));
extern double scalbn (double __x, int __n) __attribute__ ((__nothrow__ , __leaf__)); extern double __scalbn (double __x, int __n) __attribute__ ((__nothrow__ , __leaf__));
extern int ilogb (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern int __ilogb (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double scalbln (double __x, long int __n) __attribute__ ((__nothrow__ , __leaf__)); extern double __scalbln (double __x, long int __n) __attribute__ ((__nothrow__ , __leaf__));
extern double nearbyint (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern double __nearbyint (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double round (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __round (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double trunc (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __trunc (double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double remquo (double __x, double __y, int *__quo) __attribute__ ((__nothrow__ , __leaf__)); extern double __remquo (double __x, double __y, int *__quo) __attribute__ ((__nothrow__ , __leaf__));
extern long int lrint (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long int __lrint (double __x) __attribute__ ((__nothrow__ , __leaf__));
__extension__
extern long long int llrint (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long long int __llrint (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long int lround (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long int __lround (double __x) __attribute__ ((__nothrow__ , __leaf__));
__extension__
extern long long int llround (double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long long int __llround (double __x) __attribute__ ((__nothrow__ , __leaf__));
extern double fdim (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)); extern double __fdim (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__));
extern double fmax (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __fmax (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern double fmin (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern double __fmin (double __x, double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __fpclassify (double __value) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__const__));
extern int __signbit (double __value) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__const__));
extern double fma (double __x, double __y, double __z) __attribute__ ((__nothrow__ , __leaf__)); extern double __fma (double __x, double __y, double __z) __attribute__ ((__nothrow__ , __leaf__));
extern double scalb (double __x, double __n) __attribute__ ((__nothrow__ , __leaf__)); extern double __scalb (double __x, double __n) __attribute__ ((__nothrow__ , __leaf__));
extern float acosf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __acosf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float asinf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __asinf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float atanf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __atanf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float atan2f (float __y, float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __atan2f (float __y, float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float cosf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __cosf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float sinf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __sinf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float tanf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __tanf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float coshf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __coshf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float sinhf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __sinhf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float tanhf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __tanhf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float acoshf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __acoshf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float asinhf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __asinhf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float atanhf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __atanhf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float expf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __expf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float frexpf (float __x, int *__exponent) __attribute__ ((__nothrow__ , __leaf__)); extern float __frexpf (float __x, int *__exponent) __attribute__ ((__nothrow__ , __leaf__));
extern float ldexpf (float __x, int __exponent) __attribute__ ((__nothrow__ , __leaf__)); extern float __ldexpf (float __x, int __exponent) __attribute__ ((__nothrow__ , __leaf__));
extern float logf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __logf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float log10f (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __log10f (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float modff (float __x, float *__iptr) __attribute__ ((__nothrow__ , __leaf__)); extern float __modff (float __x, float *__iptr) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern float expm1f (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __expm1f (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float log1pf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __log1pf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float logbf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __logbf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float exp2f (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __exp2f (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float log2f (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __log2f (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float powf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)); extern float __powf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__));
extern float sqrtf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __sqrtf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float hypotf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)); extern float __hypotf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__));
extern float cbrtf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __cbrtf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float ceilf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __ceilf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float fabsf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __fabsf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float floorf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __floorf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float fmodf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)); extern float __fmodf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__));
extern int __isinff (float __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __finitef (float __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int isinff (float __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int finitef (float __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float dremf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)); extern float __dremf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__));
extern float significandf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __significandf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float copysignf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __copysignf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float nanf (const char *__tagb) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __nanf (const char *__tagb) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __isnanf (float __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int isnanf (float __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float j0f (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __j0f (float) __attribute__ ((__nothrow__ , __leaf__));
extern float j1f (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __j1f (float) __attribute__ ((__nothrow__ , __leaf__));
extern float jnf (int, float) __attribute__ ((__nothrow__ , __leaf__)); extern float __jnf (int, float) __attribute__ ((__nothrow__ , __leaf__));
extern float y0f (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __y0f (float) __attribute__ ((__nothrow__ , __leaf__));
extern float y1f (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __y1f (float) __attribute__ ((__nothrow__ , __leaf__));
extern float ynf (int, float) __attribute__ ((__nothrow__ , __leaf__)); extern float __ynf (int, float) __attribute__ ((__nothrow__ , __leaf__));
extern float erff (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __erff (float) __attribute__ ((__nothrow__ , __leaf__));
extern float erfcf (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __erfcf (float) __attribute__ ((__nothrow__ , __leaf__));
extern float lgammaf (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __lgammaf (float) __attribute__ ((__nothrow__ , __leaf__));
extern float tgammaf (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __tgammaf (float) __attribute__ ((__nothrow__ , __leaf__));
extern float gammaf (float) __attribute__ ((__nothrow__ , __leaf__)); extern float __gammaf (float) __attribute__ ((__nothrow__ , __leaf__));
extern float lgammaf_r (float, int *__signgamp) __attribute__ ((__nothrow__ , __leaf__)); extern float __lgammaf_r (float, int *__signgamp) __attribute__ ((__nothrow__ , __leaf__));
extern float rintf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __rintf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float nextafterf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __nextafterf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float nexttowardf (float __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __nexttowardf (float __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float remainderf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)); extern float __remainderf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__));
extern float scalbnf (float __x, int __n) __attribute__ ((__nothrow__ , __leaf__)); extern float __scalbnf (float __x, int __n) __attribute__ ((__nothrow__ , __leaf__));
extern int ilogbf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern int __ilogbf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float scalblnf (float __x, long int __n) __attribute__ ((__nothrow__ , __leaf__)); extern float __scalblnf (float __x, long int __n) __attribute__ ((__nothrow__ , __leaf__));
extern float nearbyintf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern float __nearbyintf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float roundf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __roundf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float truncf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __truncf (float __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float remquof (float __x, float __y, int *__quo) __attribute__ ((__nothrow__ , __leaf__)); extern float __remquof (float __x, float __y, int *__quo) __attribute__ ((__nothrow__ , __leaf__));
extern long int lrintf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern long int __lrintf (float __x) __attribute__ ((__nothrow__ , __leaf__));
__extension__
extern long long int llrintf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern long long int __llrintf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern long int lroundf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern long int __lroundf (float __x) __attribute__ ((__nothrow__ , __leaf__));
__extension__
extern long long int llroundf (float __x) __attribute__ ((__nothrow__ , __leaf__)); extern long long int __llroundf (float __x) __attribute__ ((__nothrow__ , __leaf__));
extern float fdimf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)); extern float __fdimf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__));
extern float fmaxf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __fmaxf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern float fminf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern float __fminf (float __x, float __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __fpclassifyf (float __value) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__const__));
extern int __signbitf (float __value) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__const__));
extern float fmaf (float __x, float __y, float __z) __attribute__ ((__nothrow__ , __leaf__)); extern float __fmaf (float __x, float __y, float __z) __attribute__ ((__nothrow__ , __leaf__));
extern float scalbf (float __x, float __n) __attribute__ ((__nothrow__ , __leaf__)); extern float __scalbf (float __x, float __n) __attribute__ ((__nothrow__ , __leaf__));
extern long double acosl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __acosl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double asinl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __asinl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double atanl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __atanl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double atan2l (long double __y, long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __atan2l (long double __y, long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double cosl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __cosl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double sinl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __sinl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double tanl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __tanl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double coshl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __coshl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double sinhl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __sinhl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double tanhl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __tanhl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double acoshl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __acoshl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double asinhl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __asinhl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double atanhl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __atanhl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double expl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __expl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double frexpl (long double __x, int *__exponent) __attribute__ ((__nothrow__ , __leaf__)); extern long double __frexpl (long double __x, int *__exponent) __attribute__ ((__nothrow__ , __leaf__));
extern long double ldexpl (long double __x, int __exponent) __attribute__ ((__nothrow__ , __leaf__)); extern long double __ldexpl (long double __x, int __exponent) __attribute__ ((__nothrow__ , __leaf__));
extern long double logl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __logl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double log10l (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __log10l (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double modfl (long double __x, long double *__iptr) __attribute__ ((__nothrow__ , __leaf__)); extern long double __modfl (long double __x, long double *__iptr) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern long double expm1l (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __expm1l (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double log1pl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __log1pl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double logbl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __logbl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double exp2l (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __exp2l (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double log2l (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __log2l (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double powl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)); extern long double __powl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__));
extern long double sqrtl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __sqrtl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double hypotl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)); extern long double __hypotl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__));
extern long double cbrtl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __cbrtl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double ceill (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __ceill (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double fabsl (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __fabsl (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double floorl (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __floorl (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double fmodl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)); extern long double __fmodl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__));
extern int __isinfl (long double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __finitel (long double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int isinfl (long double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int finitel (long double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double dreml (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)); extern long double __dreml (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__));
extern long double significandl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __significandl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double copysignl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __copysignl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double nanl (const char *__tagb) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __nanl (const char *__tagb) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __isnanl (long double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int isnanl (long double __value) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double j0l (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __j0l (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double j1l (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __j1l (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double jnl (int, long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __jnl (int, long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double y0l (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __y0l (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double y1l (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __y1l (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double ynl (int, long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __ynl (int, long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double erfl (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __erfl (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double erfcl (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __erfcl (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double lgammal (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __lgammal (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double tgammal (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __tgammal (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double gammal (long double) __attribute__ ((__nothrow__ , __leaf__)); extern long double __gammal (long double) __attribute__ ((__nothrow__ , __leaf__));
extern long double lgammal_r (long double, int *__signgamp) __attribute__ ((__nothrow__ , __leaf__)); extern long double __lgammal_r (long double, int *__signgamp) __attribute__ ((__nothrow__ , __leaf__));
extern long double rintl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __rintl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double nextafterl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __nextafterl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double nexttowardl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __nexttowardl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double remainderl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)); extern long double __remainderl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__));
extern long double scalbnl (long double __x, int __n) __attribute__ ((__nothrow__ , __leaf__)); extern long double __scalbnl (long double __x, int __n) __attribute__ ((__nothrow__ , __leaf__));
extern int ilogbl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern int __ilogbl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double scalblnl (long double __x, long int __n) __attribute__ ((__nothrow__ , __leaf__)); extern long double __scalblnl (long double __x, long int __n) __attribute__ ((__nothrow__ , __leaf__));
extern long double nearbyintl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long double __nearbyintl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double roundl (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __roundl (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double truncl (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __truncl (long double __x) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double remquol (long double __x, long double __y, int *__quo) __attribute__ ((__nothrow__ , __leaf__)); extern long double __remquol (long double __x, long double __y, int *__quo) __attribute__ ((__nothrow__ , __leaf__));
extern long int lrintl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long int __lrintl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
__extension__
extern long long int llrintl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long long int __llrintl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long int lroundl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long int __lroundl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
__extension__
extern long long int llroundl (long double __x) __attribute__ ((__nothrow__ , __leaf__)); extern long long int __llroundl (long double __x) __attribute__ ((__nothrow__ , __leaf__));
extern long double fdiml (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)); extern long double __fdiml (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__));
extern long double fmaxl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __fmaxl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern long double fminl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern long double __fminl (long double __x, long double __y) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int __fpclassifyl (long double __value) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__const__));
extern int __signbitl (long double __value) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__const__));
extern long double fmal (long double __x, long double __y, long double __z) __attribute__ ((__nothrow__ , __leaf__)); extern long double __fmal (long double __x, long double __y, long double __z) __attribute__ ((__nothrow__ , __leaf__));
extern long double scalbl (long double __x, long double __n) __attribute__ ((__nothrow__ , __leaf__)); extern long double __scalbl (long double __x, long double __n) __attribute__ ((__nothrow__ , __leaf__));
extern int signgam;
enum
{
FP_NAN =
0,
FP_INFINITE =
1,
FP_ZERO =
2,
FP_SUBNORMAL =
3,
FP_NORMAL =
4
};
typedef enum
{
_IEEE_ = -1,
_SVID_,
_XOPEN_,
_POSIX_,
_ISOC_
} _LIB_VERSION_TYPE;
extern _LIB_VERSION_TYPE _LIB_VERSION;
struct exception
{
int type;
char *name;
double arg1;
double arg2;
double retval;
};
extern int matherr (struct exception *__exc);
extern void __assert_fail (const char *__assertion, const char *__file,
unsigned int __line, const char *__function)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
extern void __assert_perror_fail (int __errnum, const char *__file,
unsigned int __line, const char *__function)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
extern void __assert (const char *__assertion, const char *__file, int __line)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
typedef int ptrdiff_t;
typedef struct {
long long __max_align_ll __attribute__((__aligned__(__alignof__(long long))));
long double __max_align_ld __attribute__((__aligned__(__alignof__(long double))));
} max_align_t;
extern void *memcpy (void *__restrict __dest, const void *__restrict __src,
size_t __n) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern void *memmove (void *__dest, const void *__src, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern void *memccpy (void *__restrict __dest, const void *__restrict __src,
int __c, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern void *memset (void *__s, int __c, size_t __n) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int memcmp (const void *__s1, const void *__s2, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern void *memchr (const void *__s, int __c, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern char *strcpy (char *__restrict __dest, const char *__restrict __src)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *strncpy (char *__restrict __dest,
const char *__restrict __src, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *strcat (char *__restrict __dest, const char *__restrict __src)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *strncat (char *__restrict __dest, const char *__restrict __src,
size_t __n) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int strcmp (const char *__s1, const char *__s2)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern int strncmp (const char *__s1, const char *__s2, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern int strcoll (const char *__s1, const char *__s2)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern size_t strxfrm (char *__restrict __dest,
const char *__restrict __src, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
typedef struct __locale_struct
{
struct __locale_data *__locales[13];
const unsigned short int *__ctype_b;
const int *__ctype_tolower;
const int *__ctype_toupper;
const char *__names[13];
} *__locale_t;
typedef __locale_t locale_t;
extern int strcoll_l (const char *__s1, const char *__s2, __locale_t __l)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2, 3)));
extern size_t strxfrm_l (char *__dest, const char *__src, size_t __n,
__locale_t __l) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2, 4)));
extern char *strdup (const char *__s)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) __attribute__ ((__nonnull__ (1)));
extern char *strndup (const char *__string, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__malloc__)) __attribute__ ((__nonnull__ (1)));
extern char *strchr (const char *__s, int __c)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern char *strrchr (const char *__s, int __c)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern size_t strcspn (const char *__s, const char *__reject)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern size_t strspn (const char *__s, const char *__accept)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *strpbrk (const char *__s, const char *__accept)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *strstr (const char *__haystack, const char *__needle)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *strtok (char *__restrict __s, const char *__restrict __delim)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern char *__strtok_r (char *__restrict __s,
const char *__restrict __delim,
char **__restrict __save_ptr)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2, 3)));
extern char *strtok_r (char *__restrict __s, const char *__restrict __delim,
char **__restrict __save_ptr)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2, 3)));
extern size_t strlen (const char *__s)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern size_t strnlen (const char *__string, size_t __maxlen)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern char *strerror (int __errnum) __attribute__ ((__nothrow__ , __leaf__));
extern int strerror_r (int __errnum, char *__buf, size_t __buflen) __asm__ ("" "__xpg_strerror_r") __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern char *strerror_l (int __errnum, __locale_t __l) __attribute__ ((__nothrow__ , __leaf__));
extern void __bzero (void *__s, size_t __n) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern void bcopy (const void *__src, void *__dest, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern void bzero (void *__s, size_t __n) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int bcmp (const void *__s1, const void *__s2, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *index (const char *__s, int __c)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern char *rindex (const char *__s, int __c)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern int ffs (int __i) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int strcasecmp (const char *__s1, const char *__s2)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern int strncasecmp (const char *__s1, const char *__s2, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *strsep (char **__restrict __stringp,
const char *__restrict __delim)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *strsignal (int __sig) __attribute__ ((__nothrow__ , __leaf__));
extern char *__stpcpy (char *__restrict __dest, const char *__restrict __src)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *stpcpy (char *__restrict __dest, const char *__restrict __src)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *__stpncpy (char *__restrict __dest,
const char *__restrict __src, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern char *stpncpy (char *__restrict __dest,
const char *__restrict __src, size_t __n)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
struct timezone
{
int tz_minuteswest;
int tz_dsttime;
};
typedef struct timezone *__restrict __timezone_ptr_t;
extern int gettimeofday (struct timeval *__restrict __tv,
__timezone_ptr_t __tz) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int settimeofday (const struct timeval *__tv,
const struct timezone *__tz)
__attribute__ ((__nothrow__ , __leaf__));
extern int adjtime (const struct timeval *__delta,
struct timeval *__olddelta) __attribute__ ((__nothrow__ , __leaf__));
enum __itimer_which
{
ITIMER_REAL = 0,
ITIMER_VIRTUAL = 1,
ITIMER_PROF = 2
};
struct itimerval
{
struct timeval it_interval;
struct timeval it_value;
};
typedef int __itimer_which_t;
extern int getitimer (__itimer_which_t __which,
struct itimerval *__value) __attribute__ ((__nothrow__ , __leaf__));
extern int setitimer (__itimer_which_t __which,
const struct itimerval *__restrict __new,
struct itimerval *__restrict __old) __attribute__ ((__nothrow__ , __leaf__));
extern int utimes (const char *__file, const struct timeval __tvp[2])
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int lutimes (const char *__file, const struct timeval __tvp[2])
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int futimes (int __fd, const struct timeval __tvp[2]) __attribute__ ((__nothrow__ , __leaf__));
struct tm
{
int tm_sec;
int tm_min;
int tm_hour;
int tm_mday;
int tm_mon;
int tm_year;
int tm_wday;
int tm_yday;
int tm_isdst;
long int tm_gmtoff;
const char *tm_zone;
};
struct itimerspec
{
struct timespec it_interval;
struct timespec it_value;
};
struct sigevent;
extern clock_t clock (void) __attribute__ ((__nothrow__ , __leaf__));
extern time_t time (time_t *__timer) __attribute__ ((__nothrow__ , __leaf__));
extern double difftime (time_t __time1, time_t __time0)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern time_t mktime (struct tm *__tp) __attribute__ ((__nothrow__ , __leaf__));
extern size_t strftime (char *__restrict __s, size_t __maxsize,
const char *__restrict __format,
const struct tm *__restrict __tp) __attribute__ ((__nothrow__ , __leaf__));
extern size_t strftime_l (char *__restrict __s, size_t __maxsize,
const char *__restrict __format,
const struct tm *__restrict __tp,
__locale_t __loc) __attribute__ ((__nothrow__ , __leaf__));
extern struct tm *gmtime (const time_t *__timer) __attribute__ ((__nothrow__ , __leaf__));
extern struct tm *localtime (const time_t *__timer) __attribute__ ((__nothrow__ , __leaf__));
extern struct tm *gmtime_r (const time_t *__restrict __timer,
struct tm *__restrict __tp) __attribute__ ((__nothrow__ , __leaf__));
extern struct tm *localtime_r (const time_t *__restrict __timer,
struct tm *__restrict __tp) __attribute__ ((__nothrow__ , __leaf__));
extern char *asctime (const struct tm *__tp) __attribute__ ((__nothrow__ , __leaf__));
extern char *ctime (const time_t *__timer) __attribute__ ((__nothrow__ , __leaf__));
extern char *asctime_r (const struct tm *__restrict __tp,
char *__restrict __buf) __attribute__ ((__nothrow__ , __leaf__));
extern char *ctime_r (const time_t *__restrict __timer,
char *__restrict __buf) __attribute__ ((__nothrow__ , __leaf__));
extern char *__tzname[2];
extern int __daylight;
extern long int __timezone;
extern char *tzname[2];
extern void tzset (void) __attribute__ ((__nothrow__ , __leaf__));
extern int daylight;
extern long int timezone;
extern int stime (const time_t *__when) __attribute__ ((__nothrow__ , __leaf__));
extern time_t timegm (struct tm *__tp) __attribute__ ((__nothrow__ , __leaf__));
extern time_t timelocal (struct tm *__tp) __attribute__ ((__nothrow__ , __leaf__));
extern int dysize (int __year) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int nanosleep (const struct timespec *__requested_time,
struct timespec *__remaining);
extern int clock_getres (clockid_t __clock_id, struct timespec *__res) __attribute__ ((__nothrow__ , __leaf__));
extern int clock_gettime (clockid_t __clock_id, struct timespec *__tp) __attribute__ ((__nothrow__ , __leaf__));
extern int clock_settime (clockid_t __clock_id, const struct timespec *__tp)
__attribute__ ((__nothrow__ , __leaf__));
extern int clock_nanosleep (clockid_t __clock_id, int __flags,
const struct timespec *__req,
struct timespec *__rem);
extern int clock_getcpuclockid (pid_t __pid, clockid_t *__clock_id) __attribute__ ((__nothrow__ , __leaf__));
extern int timer_create (clockid_t __clock_id,
struct sigevent *__restrict __evp,
timer_t *__restrict __timerid) __attribute__ ((__nothrow__ , __leaf__));
extern int timer_delete (timer_t __timerid) __attribute__ ((__nothrow__ , __leaf__));
extern int timer_settime (timer_t __timerid, int __flags,
const struct itimerspec *__restrict __value,
struct itimerspec *__restrict __ovalue) __attribute__ ((__nothrow__ , __leaf__));
extern int timer_gettime (timer_t __timerid, struct itimerspec *__value)
__attribute__ ((__nothrow__ , __leaf__));
extern int timer_getoverrun (timer_t __timerid) __attribute__ ((__nothrow__ , __leaf__));
extern int timespec_get (struct timespec *__ts, int __base)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
struct utsname
{
char sysname[65];
char nodename[65];
char release[65];
char version[65];
char machine[65];
char __domainname[65];
};
extern int uname (struct utsname *__name) __attribute__ ((__nothrow__ , __leaf__));
enum __rlimit_resource
{
RLIMIT_CPU = 0,
RLIMIT_FSIZE = 1,
RLIMIT_DATA = 2,
RLIMIT_STACK = 3,
RLIMIT_CORE = 4,
__RLIMIT_RSS = 5,
RLIMIT_NOFILE = 7,
__RLIMIT_OFILE = RLIMIT_NOFILE,
RLIMIT_AS = 9,
__RLIMIT_NPROC = 6,
__RLIMIT_MEMLOCK = 8,
__RLIMIT_LOCKS = 10,
__RLIMIT_SIGPENDING = 11,
__RLIMIT_MSGQUEUE = 12,
__RLIMIT_NICE = 13,
__RLIMIT_RTPRIO = 14,
__RLIMIT_RTTIME = 15,
__RLIMIT_NLIMITS = 16,
__RLIM_NLIMITS = __RLIMIT_NLIMITS
};
typedef __rlim_t rlim_t;
struct rlimit
{
rlim_t rlim_cur;
rlim_t rlim_max;
};
enum __rusage_who
{
RUSAGE_SELF = 0,
RUSAGE_CHILDREN = -1
};
struct rusage
{
struct timeval ru_utime;
struct timeval ru_stime;
};
enum __priority_which
{
PRIO_PROCESS = 0,
PRIO_PGRP = 1,
PRIO_USER = 2
};
typedef int __rlimit_resource_t;
typedef int __rusage_who_t;
typedef int __priority_which_t;
extern int getrlimit (__rlimit_resource_t __resource,
struct rlimit *__rlimits) __attribute__ ((__nothrow__ , __leaf__));
extern int setrlimit (__rlimit_resource_t __resource,
const struct rlimit *__rlimits) __attribute__ ((__nothrow__ , __leaf__));
extern int getrusage (__rusage_who_t __who, struct rusage *__usage) __attribute__ ((__nothrow__ , __leaf__));
extern int getpriority (__priority_which_t __which, id_t __who) __attribute__ ((__nothrow__ , __leaf__));
extern int setpriority (__priority_which_t __which, id_t __who, int __prio)
__attribute__ ((__nothrow__ , __leaf__));
extern char *dirname (char *__path) __attribute__ ((__nothrow__ , __leaf__));
extern char *__xpg_basename (char *__path) __attribute__ ((__nothrow__ , __leaf__));
typedef __useconds_t useconds_t;
typedef __intptr_t intptr_t;
typedef __socklen_t socklen_t;
extern int access (const char *__name, int __type) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int faccessat (int __fd, const char *__file, int __type, int __flag)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2))) ;
extern __off_t lseek (int __fd, __off_t __offset, int __whence) __attribute__ ((__nothrow__ , __leaf__));
extern int close (int __fd);
extern ssize_t read (int __fd, void *__buf, size_t __nbytes) ;
extern ssize_t write (int __fd, const void *__buf, size_t __n) ;
extern ssize_t pread (int __fd, void *__buf, size_t __nbytes,
__off_t __offset) ;
extern ssize_t pwrite (int __fd, const void *__buf, size_t __n,
__off_t __offset) ;
extern int pipe (int __pipedes[2]) __attribute__ ((__nothrow__ , __leaf__)) ;
extern unsigned int alarm (unsigned int __seconds) __attribute__ ((__nothrow__ , __leaf__));
extern unsigned int sleep (unsigned int __seconds);
extern __useconds_t ualarm (__useconds_t __value, __useconds_t __interval)
__attribute__ ((__nothrow__ , __leaf__));
extern int usleep (__useconds_t __useconds);
extern int pause (void);
extern int chown (const char *__file, __uid_t __owner, __gid_t __group)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int fchown (int __fd, __uid_t __owner, __gid_t __group) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int lchown (const char *__file, __uid_t __owner, __gid_t __group)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int fchownat (int __fd, const char *__file, __uid_t __owner,
__gid_t __group, int __flag)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2))) ;
extern int chdir (const char *__path) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int fchdir (int __fd) __attribute__ ((__nothrow__ , __leaf__)) ;
extern char *getcwd (char *__buf, size_t __size) __attribute__ ((__nothrow__ , __leaf__)) ;
extern char *getwd (char *__buf)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) __attribute__ ((__deprecated__)) ;
extern int dup (int __fd) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int dup2 (int __fd, int __fd2) __attribute__ ((__nothrow__ , __leaf__));
extern char **__environ;
extern int execve (const char *__path, char *const __argv[],
char *const __envp[]) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int fexecve (int __fd, char *const __argv[], char *const __envp[])
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern int execv (const char *__path, char *const __argv[])
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int execle (const char *__path, const char *__arg, ...)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int execl (const char *__path, const char *__arg, ...)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int execvp (const char *__file, char *const __argv[])
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int execlp (const char *__file, const char *__arg, ...)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
extern int nice (int __inc) __attribute__ ((__nothrow__ , __leaf__)) ;
extern void _exit (int __status) __attribute__ ((__noreturn__));
enum
{
_PC_LINK_MAX,
_PC_MAX_CANON,
_PC_MAX_INPUT,
_PC_NAME_MAX,
_PC_PATH_MAX,
_PC_PIPE_BUF,
_PC_CHOWN_RESTRICTED,
_PC_NO_TRUNC,
_PC_VDISABLE,
_PC_SYNC_IO,
_PC_ASYNC_IO,
_PC_PRIO_IO,
_PC_SOCK_MAXBUF,
_PC_FILESIZEBITS,
_PC_REC_INCR_XFER_SIZE,
_PC_REC_MAX_XFER_SIZE,
_PC_REC_MIN_XFER_SIZE,
_PC_REC_XFER_ALIGN,
_PC_ALLOC_SIZE_MIN,
_PC_SYMLINK_MAX,
_PC_2_SYMLINKS
};
enum
{
_SC_ARG_MAX,
_SC_CHILD_MAX,
_SC_CLK_TCK,
_SC_NGROUPS_MAX,
_SC_OPEN_MAX,
_SC_STREAM_MAX,
_SC_TZNAME_MAX,
_SC_JOB_CONTROL,
_SC_SAVED_IDS,
_SC_REALTIME_SIGNALS,
_SC_PRIORITY_SCHEDULING,
_SC_TIMERS,
_SC_ASYNCHRONOUS_IO,
_SC_PRIORITIZED_IO,
_SC_SYNCHRONIZED_IO,
_SC_FSYNC,
_SC_MAPPED_FILES,
_SC_MEMLOCK,
_SC_MEMLOCK_RANGE,
_SC_MEMORY_PROTECTION,
_SC_MESSAGE_PASSING,
_SC_SEMAPHORES,
_SC_SHARED_MEMORY_OBJECTS,
_SC_AIO_LISTIO_MAX,
_SC_AIO_MAX,
_SC_AIO_PRIO_DELTA_MAX,
_SC_DELAYTIMER_MAX,
_SC_MQ_OPEN_MAX,
_SC_MQ_PRIO_MAX,
_SC_VERSION,
_SC_PAGESIZE,
_SC_RTSIG_MAX,
_SC_SEM_NSEMS_MAX,
_SC_SEM_VALUE_MAX,
_SC_SIGQUEUE_MAX,
_SC_TIMER_MAX,
_SC_BC_BASE_MAX,
_SC_BC_DIM_MAX,
_SC_BC_SCALE_MAX,
_SC_BC_STRING_MAX,
_SC_COLL_WEIGHTS_MAX,
_SC_EQUIV_CLASS_MAX,
_SC_EXPR_NEST_MAX,
_SC_LINE_MAX,
_SC_RE_DUP_MAX,
_SC_CHARCLASS_NAME_MAX,
_SC_2_VERSION,
_SC_2_C_BIND,
_SC_2_C_DEV,
_SC_2_FORT_DEV,
_SC_2_FORT_RUN,
_SC_2_SW_DEV,
_SC_2_LOCALEDEF,
_SC_PII,
_SC_PII_XTI,
_SC_PII_SOCKET,
_SC_PII_INTERNET,
_SC_PII_OSI,
_SC_POLL,
_SC_SELECT,
_SC_UIO_MAXIOV,
_SC_IOV_MAX = _SC_UIO_MAXIOV,
_SC_PII_INTERNET_STREAM,
_SC_PII_INTERNET_DGRAM,
_SC_PII_OSI_COTS,
_SC_PII_OSI_CLTS,
_SC_PII_OSI_M,
_SC_T_IOV_MAX,
_SC_THREADS,
_SC_THREAD_SAFE_FUNCTIONS,
_SC_GETGR_R_SIZE_MAX,
_SC_GETPW_R_SIZE_MAX,
_SC_LOGIN_NAME_MAX,
_SC_TTY_NAME_MAX,
_SC_THREAD_DESTRUCTOR_ITERATIONS,
_SC_THREAD_KEYS_MAX,
_SC_THREAD_STACK_MIN,
_SC_THREAD_THREADS_MAX,
_SC_THREAD_ATTR_STACKADDR,
_SC_THREAD_ATTR_STACKSIZE,
_SC_THREAD_PRIORITY_SCHEDULING,
_SC_THREAD_PRIO_INHERIT,
_SC_THREAD_PRIO_PROTECT,
_SC_THREAD_PROCESS_SHARED,
_SC_NPROCESSORS_CONF,
_SC_NPROCESSORS_ONLN,
_SC_PHYS_PAGES,
_SC_AVPHYS_PAGES,
_SC_ATEXIT_MAX,
_SC_PASS_MAX,
_SC_XOPEN_VERSION,
_SC_XOPEN_XCU_VERSION,
_SC_XOPEN_UNIX,
_SC_XOPEN_CRYPT,
_SC_XOPEN_ENH_I18N,
_SC_XOPEN_SHM,
_SC_2_CHAR_TERM,
_SC_2_C_VERSION,
_SC_2_UPE,
_SC_XOPEN_XPG2,
_SC_XOPEN_XPG3,
_SC_XOPEN_XPG4,
_SC_CHAR_BIT,
_SC_CHAR_MAX,
_SC_CHAR_MIN,
_SC_INT_MAX,
_SC_INT_MIN,
_SC_LONG_BIT,
_SC_WORD_BIT,
_SC_MB_LEN_MAX,
_SC_NZERO,
_SC_SSIZE_MAX,
_SC_SCHAR_MAX,
_SC_SCHAR_MIN,
_SC_SHRT_MAX,
_SC_SHRT_MIN,
_SC_UCHAR_MAX,
_SC_UINT_MAX,
_SC_ULONG_MAX,
_SC_USHRT_MAX,
_SC_NL_ARGMAX,
_SC_NL_LANGMAX,
_SC_NL_MSGMAX,
_SC_NL_NMAX,
_SC_NL_SETMAX,
_SC_NL_TEXTMAX,
_SC_XBS5_ILP32_OFF32,
_SC_XBS5_ILP32_OFFBIG,
_SC_XBS5_LP64_OFF64,
_SC_XBS5_LPBIG_OFFBIG,
_SC_XOPEN_LEGACY,
_SC_XOPEN_REALTIME,
_SC_XOPEN_REALTIME_THREADS,
_SC_ADVISORY_INFO,
_SC_BARRIERS,
_SC_BASE,
_SC_C_LANG_SUPPORT,
_SC_C_LANG_SUPPORT_R,
_SC_CLOCK_SELECTION,
_SC_CPUTIME,
_SC_THREAD_CPUTIME,
_SC_DEVICE_IO,
_SC_DEVICE_SPECIFIC,
_SC_DEVICE_SPECIFIC_R,
_SC_FD_MGMT,
_SC_FIFO,
_SC_PIPE,
_SC_FILE_ATTRIBUTES,
_SC_FILE_LOCKING,
_SC_FILE_SYSTEM,
_SC_MONOTONIC_CLOCK,
_SC_MULTI_PROCESS,
_SC_SINGLE_PROCESS,
_SC_NETWORKING,
_SC_READER_WRITER_LOCKS,
_SC_SPIN_LOCKS,
_SC_REGEXP,
_SC_REGEX_VERSION,
_SC_SHELL,
_SC_SIGNALS,
_SC_SPAWN,
_SC_SPORADIC_SERVER,
_SC_THREAD_SPORADIC_SERVER,
_SC_SYSTEM_DATABASE,
_SC_SYSTEM_DATABASE_R,
_SC_TIMEOUTS,
_SC_TYPED_MEMORY_OBJECTS,
_SC_USER_GROUPS,
_SC_USER_GROUPS_R,
_SC_2_PBS,
_SC_2_PBS_ACCOUNTING,
_SC_2_PBS_LOCATE,
_SC_2_PBS_MESSAGE,
_SC_2_PBS_TRACK,
_SC_SYMLOOP_MAX,
_SC_STREAMS,
_SC_2_PBS_CHECKPOINT,
_SC_V6_ILP32_OFF32,
_SC_V6_ILP32_OFFBIG,
_SC_V6_LP64_OFF64,
_SC_V6_LPBIG_OFFBIG,
_SC_HOST_NAME_MAX,
_SC_TRACE,
_SC_TRACE_EVENT_FILTER,
_SC_TRACE_INHERIT,
_SC_TRACE_LOG,
_SC_LEVEL1_ICACHE_SIZE,
_SC_LEVEL1_ICACHE_ASSOC,
_SC_LEVEL1_ICACHE_LINESIZE,
_SC_LEVEL1_DCACHE_SIZE,
_SC_LEVEL1_DCACHE_ASSOC,
_SC_LEVEL1_DCACHE_LINESIZE,
_SC_LEVEL2_CACHE_SIZE,
_SC_LEVEL2_CACHE_ASSOC,
_SC_LEVEL2_CACHE_LINESIZE,
_SC_LEVEL3_CACHE_SIZE,
_SC_LEVEL3_CACHE_ASSOC,
_SC_LEVEL3_CACHE_LINESIZE,
_SC_LEVEL4_CACHE_SIZE,
_SC_LEVEL4_CACHE_ASSOC,
_SC_LEVEL4_CACHE_LINESIZE,
_SC_IPV6 = _SC_LEVEL1_ICACHE_SIZE + 50,
_SC_RAW_SOCKETS,
_SC_V7_ILP32_OFF32,
_SC_V7_ILP32_OFFBIG,
_SC_V7_LP64_OFF64,
_SC_V7_LPBIG_OFFBIG,
_SC_SS_REPL_MAX,
_SC_TRACE_EVENT_NAME_MAX,
_SC_TRACE_NAME_MAX,
_SC_TRACE_SYS_MAX,
_SC_TRACE_USER_EVENT_MAX,
_SC_XOPEN_STREAMS,
_SC_THREAD_ROBUST_PRIO_INHERIT,
_SC_THREAD_ROBUST_PRIO_PROTECT
};
enum
{
_CS_PATH,
_CS_V6_WIDTH_RESTRICTED_ENVS,
_CS_GNU_LIBC_VERSION,
_CS_GNU_LIBPTHREAD_VERSION,
_CS_V5_WIDTH_RESTRICTED_ENVS,
_CS_V7_WIDTH_RESTRICTED_ENVS,
_CS_LFS_CFLAGS = 1000,
_CS_LFS_LDFLAGS,
_CS_LFS_LIBS,
_CS_LFS_LINTFLAGS,
_CS_LFS64_CFLAGS,
_CS_LFS64_LDFLAGS,
_CS_LFS64_LIBS,
_CS_LFS64_LINTFLAGS,
_CS_XBS5_ILP32_OFF32_CFLAGS = 1100,
_CS_XBS5_ILP32_OFF32_LDFLAGS,
_CS_XBS5_ILP32_OFF32_LIBS,
_CS_XBS5_ILP32_OFF32_LINTFLAGS,
_CS_XBS5_ILP32_OFFBIG_CFLAGS,
_CS_XBS5_ILP32_OFFBIG_LDFLAGS,
_CS_XBS5_ILP32_OFFBIG_LIBS,
_CS_XBS5_ILP32_OFFBIG_LINTFLAGS,
_CS_XBS5_LP64_OFF64_CFLAGS,
_CS_XBS5_LP64_OFF64_LDFLAGS,
_CS_XBS5_LP64_OFF64_LIBS,
_CS_XBS5_LP64_OFF64_LINTFLAGS,
_CS_XBS5_LPBIG_OFFBIG_CFLAGS,
_CS_XBS5_LPBIG_OFFBIG_LDFLAGS,
_CS_XBS5_LPBIG_OFFBIG_LIBS,
_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS,
_CS_POSIX_V6_ILP32_OFF32_CFLAGS,
_CS_POSIX_V6_ILP32_OFF32_LDFLAGS,
_CS_POSIX_V6_ILP32_OFF32_LIBS,
_CS_POSIX_V6_ILP32_OFF32_LINTFLAGS,
_CS_POSIX_V6_ILP32_OFFBIG_CFLAGS,
_CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS,
_CS_POSIX_V6_ILP32_OFFBIG_LIBS,
_CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS,
_CS_POSIX_V6_LP64_OFF64_CFLAGS,
_CS_POSIX_V6_LP64_OFF64_LDFLAGS,
_CS_POSIX_V6_LP64_OFF64_LIBS,
_CS_POSIX_V6_LP64_OFF64_LINTFLAGS,
_CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS,
_CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS,
_CS_POSIX_V6_LPBIG_OFFBIG_LIBS,
_CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS,
_CS_POSIX_V7_ILP32_OFF32_CFLAGS,
_CS_POSIX_V7_ILP32_OFF32_LDFLAGS,
_CS_POSIX_V7_ILP32_OFF32_LIBS,
_CS_POSIX_V7_ILP32_OFF32_LINTFLAGS,
_CS_POSIX_V7_ILP32_OFFBIG_CFLAGS,
_CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS,
_CS_POSIX_V7_ILP32_OFFBIG_LIBS,
_CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS,
_CS_POSIX_V7_LP64_OFF64_CFLAGS,
_CS_POSIX_V7_LP64_OFF64_LDFLAGS,
_CS_POSIX_V7_LP64_OFF64_LIBS,
_CS_POSIX_V7_LP64_OFF64_LINTFLAGS,
_CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS,
_CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS,
_CS_POSIX_V7_LPBIG_OFFBIG_LIBS,
_CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS,
_CS_V6_ENV,
_CS_V7_ENV
};
extern long int pathconf (const char *__path, int __name)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern long int fpathconf (int __fd, int __name) __attribute__ ((__nothrow__ , __leaf__));
extern long int sysconf (int __name) __attribute__ ((__nothrow__ , __leaf__));
extern size_t confstr (int __name, char *__buf, size_t __len) __attribute__ ((__nothrow__ , __leaf__));
extern __pid_t getpid (void) __attribute__ ((__nothrow__ , __leaf__));
extern __pid_t getppid (void) __attribute__ ((__nothrow__ , __leaf__));
extern __pid_t getpgrp (void) __attribute__ ((__nothrow__ , __leaf__));
extern __pid_t __getpgid (__pid_t __pid) __attribute__ ((__nothrow__ , __leaf__));
extern __pid_t getpgid (__pid_t __pid) __attribute__ ((__nothrow__ , __leaf__));
extern int setpgid (__pid_t __pid, __pid_t __pgid) __attribute__ ((__nothrow__ , __leaf__));
extern int setpgrp (void) __attribute__ ((__nothrow__ , __leaf__));
extern __pid_t setsid (void) __attribute__ ((__nothrow__ , __leaf__));
extern __pid_t getsid (__pid_t __pid) __attribute__ ((__nothrow__ , __leaf__));
extern __uid_t getuid (void) __attribute__ ((__nothrow__ , __leaf__));
extern __uid_t geteuid (void) __attribute__ ((__nothrow__ , __leaf__));
extern __gid_t getgid (void) __attribute__ ((__nothrow__ , __leaf__));
extern __gid_t getegid (void) __attribute__ ((__nothrow__ , __leaf__));
extern int getgroups (int __size, __gid_t __list[]) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int setuid (__uid_t __uid) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int setreuid (__uid_t __ruid, __uid_t __euid) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int seteuid (__uid_t __uid) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int setgid (__gid_t __gid) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int setregid (__gid_t __rgid, __gid_t __egid) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int setegid (__gid_t __gid) __attribute__ ((__nothrow__ , __leaf__)) ;
extern __pid_t fork (void) __attribute__ ((__nothrow__));
extern __pid_t vfork (void) __attribute__ ((__nothrow__ , __leaf__));
extern char *ttyname (int __fd) __attribute__ ((__nothrow__ , __leaf__));
extern int ttyname_r (int __fd, char *__buf, size_t __buflen)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2))) ;
extern int isatty (int __fd) __attribute__ ((__nothrow__ , __leaf__));
extern int ttyslot (void) __attribute__ ((__nothrow__ , __leaf__));
extern int link (const char *__from, const char *__to)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2))) ;
extern int linkat (int __fromfd, const char *__from, int __tofd,
const char *__to, int __flags)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2, 4))) ;
extern int symlink (const char *__from, const char *__to)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2))) ;
extern ssize_t readlink (const char *__restrict __path,
char *__restrict __buf, size_t __len)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2))) ;
extern int symlinkat (const char *__from, int __tofd,
const char *__to) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 3))) ;
extern ssize_t readlinkat (int __fd, const char *__restrict __path,
char *__restrict __buf, size_t __len)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2, 3))) ;
extern int unlink (const char *__name) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int unlinkat (int __fd, const char *__name, int __flag)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (2)));
extern int rmdir (const char *__path) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern __pid_t tcgetpgrp (int __fd) __attribute__ ((__nothrow__ , __leaf__));
extern int tcsetpgrp (int __fd, __pid_t __pgrp_id) __attribute__ ((__nothrow__ , __leaf__));
extern char *getlogin (void);
extern int getlogin_r (char *__name, size_t __name_len) __attribute__ ((__nonnull__ (1)));
extern int setlogin (const char *__name) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern char *optarg;
extern int optind;
extern int opterr;
extern int optopt;
extern int getopt (int ___argc, char *const *___argv, const char *__shortopts)
__attribute__ ((__nothrow__ , __leaf__));
extern int gethostname (char *__name, size_t __len) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int sethostname (const char *__name, size_t __len)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int sethostid (long int __id) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int getdomainname (char *__name, size_t __len)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int setdomainname (const char *__name, size_t __len)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int vhangup (void) __attribute__ ((__nothrow__ , __leaf__));
extern int revoke (const char *__file) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int profil (unsigned short int *__sample_buffer, size_t __size,
size_t __offset, unsigned int __scale)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1)));
extern int acct (const char *__name) __attribute__ ((__nothrow__ , __leaf__));
extern char *getusershell (void) __attribute__ ((__nothrow__ , __leaf__));
extern void endusershell (void) __attribute__ ((__nothrow__ , __leaf__));
extern void setusershell (void) __attribute__ ((__nothrow__ , __leaf__));
extern int daemon (int __nochdir, int __noclose) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int chroot (const char *__path) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern char *getpass (const char *__prompt) __attribute__ ((__nonnull__ (1)));
extern int fsync (int __fd);
extern long int gethostid (void);
extern void sync (void) __attribute__ ((__nothrow__ , __leaf__));
extern int getpagesize (void) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__));
extern int getdtablesize (void) __attribute__ ((__nothrow__ , __leaf__));
extern int truncate (const char *__file, __off_t __length)
__attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) ;
extern int ftruncate (int __fd, __off_t __length) __attribute__ ((__nothrow__ , __leaf__)) ;
extern int brk (void *__addr) __attribute__ ((__nothrow__ , __leaf__)) ;
extern void *sbrk (intptr_t __delta) __attribute__ ((__nothrow__ , __leaf__));
extern long int syscall (long int __sysno, ...) __attribute__ ((__nothrow__ , __leaf__));
extern int lockf (int __fd, int __cmd, __off_t __len) ;
extern int fdatasync (int __fildes);
void bots_get_date(char *str);
void bots_get_architecture(char *str);
void bots_get_load_average(char *str);
void bots_print_results(void);
typedef struct
{
unsigned char _x[4]
__attribute__((__aligned__(4)));
} omp_lock_t;
typedef struct
{
unsigned char _x[8 + sizeof (void *)]
__attribute__((__aligned__(sizeof (void *))));
} omp_nest_lock_t;
typedef enum omp_sched_t
{
omp_sched_static = 1,
omp_sched_dynamic = 2,
omp_sched_guided = 3,
omp_sched_auto = 4
} omp_sched_t;
typedef enum omp_proc_bind_t
{
omp_proc_bind_false = 0,
omp_proc_bind_true = 1,
omp_proc_bind_master = 2,
omp_proc_bind_close = 3,
omp_proc_bind_spread = 4
} omp_proc_bind_t;
typedef enum omp_lock_hint_t
{
omp_lock_hint_none = 0,
omp_lock_hint_uncontended = 1,
omp_lock_hint_contended = 2,
omp_lock_hint_nonspeculative = 4,
omp_lock_hint_speculative = 8
} omp_lock_hint_t;
extern void omp_set_num_threads (int) __attribute__((__nothrow__));
extern int omp_get_num_threads (void) __attribute__((__nothrow__));
extern int omp_get_max_threads (void) __attribute__((__nothrow__));
extern int omp_get_thread_num (void) __attribute__((__nothrow__));
extern int omp_get_num_procs (void) __attribute__((__nothrow__));
extern int omp_in_parallel (void) __attribute__((__nothrow__));
extern void omp_set_dynamic (int) __attribute__((__nothrow__));
extern int omp_get_dynamic (void) __attribute__((__nothrow__));
extern void omp_set_nested (int) __attribute__((__nothrow__));
extern int omp_get_nested (void) __attribute__((__nothrow__));
extern void omp_init_lock (omp_lock_t *) __attribute__((__nothrow__));
extern void omp_init_lock_with_hint (omp_lock_t *, omp_lock_hint_t)
__attribute__((__nothrow__));
extern void omp_destroy_lock (omp_lock_t *) __attribute__((__nothrow__));
extern void omp_set_lock (omp_lock_t *) __attribute__((__nothrow__));
extern void omp_unset_lock (omp_lock_t *) __attribute__((__nothrow__));
extern int omp_test_lock (omp_lock_t *) __attribute__((__nothrow__));
extern void omp_init_nest_lock (omp_nest_lock_t *) __attribute__((__nothrow__));
extern void omp_init_nest_lock_with_hint (omp_lock_t *, omp_lock_hint_t)
__attribute__((__nothrow__));
extern void omp_destroy_nest_lock (omp_nest_lock_t *) __attribute__((__nothrow__));
extern void omp_set_nest_lock (omp_nest_lock_t *) __attribute__((__nothrow__));
extern void omp_unset_nest_lock (omp_nest_lock_t *) __attribute__((__nothrow__));
extern int omp_test_nest_lock (omp_nest_lock_t *) __attribute__((__nothrow__));
extern double omp_get_wtime (void) __attribute__((__nothrow__));
extern double omp_get_wtick (void) __attribute__((__nothrow__));
extern void omp_set_schedule (omp_sched_t, int) __attribute__((__nothrow__));
extern void omp_get_schedule (omp_sched_t *, int *) __attribute__((__nothrow__));
extern int omp_get_thread_limit (void) __attribute__((__nothrow__));
extern void omp_set_max_active_levels (int) __attribute__((__nothrow__));
extern int omp_get_max_active_levels (void) __attribute__((__nothrow__));
extern int omp_get_level (void) __attribute__((__nothrow__));
extern int omp_get_ancestor_thread_num (int) __attribute__((__nothrow__));
extern int omp_get_team_size (int) __attribute__((__nothrow__));
extern int omp_get_active_level (void) __attribute__((__nothrow__));
extern int omp_in_final (void) __attribute__((__nothrow__));
extern int omp_get_cancellation (void) __attribute__((__nothrow__));
extern omp_proc_bind_t omp_get_proc_bind (void) __attribute__((__nothrow__));
extern int omp_get_num_places (void) __attribute__((__nothrow__));
extern int omp_get_place_num_procs (int) __attribute__((__nothrow__));
extern void omp_get_place_proc_ids (int, int *) __attribute__((__nothrow__));
extern int omp_get_place_num (void) __attribute__((__nothrow__));
extern int omp_get_partition_num_places (void) __attribute__((__nothrow__));
extern void omp_get_partition_place_nums (int *) __attribute__((__nothrow__));
extern void omp_set_default_device (int) __attribute__((__nothrow__));
extern int omp_get_default_device (void) __attribute__((__nothrow__));
extern int omp_get_num_devices (void) __attribute__((__nothrow__));
extern int omp_get_num_teams (void) __attribute__((__nothrow__));
extern int omp_get_team_num (void) __attribute__((__nothrow__));
extern int omp_is_initial_device (void) __attribute__((__nothrow__));
extern int omp_get_initial_device (void) __attribute__((__nothrow__));
extern int omp_get_max_task_priority (void) __attribute__((__nothrow__));
extern void *omp_target_alloc (unsigned int, int) __attribute__((__nothrow__));
extern void omp_target_free (void *, int) __attribute__((__nothrow__));
extern int omp_target_is_present (void *, int) __attribute__((__nothrow__));
extern int omp_target_memcpy (void *, void *, unsigned int, unsigned int,
unsigned int, int, int) __attribute__((__nothrow__));
extern int omp_target_memcpy_rect (void *, void *, unsigned int, int,
const unsigned int *,
const unsigned int *,
const unsigned int *,
const unsigned int *,
const unsigned int *, int, int)
__attribute__((__nothrow__));
extern int omp_target_associate_ptr (void *, void *, unsigned int,
unsigned int, int) __attribute__((__nothrow__));
extern int omp_target_disassociate_ptr (void *, int) __attribute__((__nothrow__));
void bots_print_usage(void);
void bots_print_usage_option(char opt, int type, char* description, char *val, int subc, char **subv);
void bots_initialize();
void bots_finalize();
void bots_sequential_ini();
long bots_sequential();
void bots_sequential_fini();
int bots_check_result();
void bots_print_usage_specific();
void bots_get_params_specific(int argc, char **argv);
void bots_set_info();
void bots_get_params_common(int argc, char **argv);
void bots_get_params(int argc, char **argv);
void ctrl_c_handler(int s);
void energymonitor__initialize();
void energymonitor__setfilename(char *profFileName);
void energymonitor__saveascsv(char *profFileName);
void energymonitor__saveastextfile(char* profFileName);
void energymonitor__init(int cores,float sleeptime);
void energymonitor__setsleeptime(float time);
void energymonitor__startprofiling();
void energymonitor__stopprofiling();
void energymonitor__pauseprofiling();
void energymonotor__upauseprofiling();
void energymonitor__trackpoweronly();
void energymonitor__trackvoltage();
void energymonitor__trackeverything();
int energymonitor__runfunction();
void energymonitor__settrackingcores(int cores);
void* energymonitor__startprofilingthread(void* threadid);
struct Results {
long hosps_number;
long hosps_personnel;
long total_patients;
long total_in_village;
long total_waiting;
long total_assess;
long total_inside;
long total_time;
long total_hosps_v;
};
extern int sim_level;
struct Patient {
int id;
int32_t seed;
int time;
int time_left;
int hosps_visited;
struct Village *home_village;
struct Patient *back;
struct Patient *forward;
};
struct Hosp {
int personnel;
int free_personnel;
struct Patient *waiting;
struct Patient *assess;
struct Patient *inside;
struct Patient *realloc;
omp_lock_t realloc_lock;
};
struct Village {
int id;
struct Village *back;
struct Village *next;
struct Village *forward;
struct Patient *population;
struct Hosp hosp;
int level;
int32_t seed;
};
float my_rand(int32_t *seed);
struct Patient *generate_patient(struct Village *village);
void put_in_hosp(struct Hosp *hosp, struct Patient *patient);
void addList(struct Patient **list, struct Patient *patient);
void removeList(struct Patient **list, struct Patient *patient);
void check_patients_inside(struct Village *village);
void check_patients_waiting(struct Village *village);
void check_patients_realloc(struct Village *village);
void check_patients_assess_par(struct Village *village);
float get_num_people(struct Village *village);
float get_total_time(struct Village *village);
float get_total_hosps(struct Village *village);
struct Results get_results(struct Village *village);
void read_input_data(char *filename);
void allocate_village( struct Village **capital, struct Village *back, struct Village *next, int level, int32_t vid);
void sim_village_main_par(struct Village *top);
void sim_village_par(struct Village *village);
int check_village(struct Village *top);
void check_patients_assess(struct Village *village);
void check_patients_population(struct Village *village);
void sim_village(struct Village *village);
void my_print(struct Village *village);
extern int bots_sequential_flag;
extern int bots_benchmark_flag;
extern int bots_check_flag;
extern int bots_result;
extern int bots_output_format;
extern int bots_print_header;
extern char bots_name[];
extern char bots_parameters[];
extern char bots_model[];
extern char bots_resources[];
extern char bots_exec_date[];
extern char bots_exec_message[];
extern char bots_comp_date[];
extern char bots_comp_message[];
extern char bots_cc[];
extern char bots_cflags[];
extern char bots_ld[];
extern char bots_ldflags[];
extern double bots_time_program;
extern double bots_time_sequential;
extern unsigned long long bots_number_of_tasks;
extern char bots_cutoff[];
extern int bots_cutoff_value;
extern int bots_app_cutoff_value;
extern int bots_app_cutoff_value_1;
extern int bots_app_cutoff_value_2;
extern int bots_arg_size;
extern int bots_arg_size_1;
extern int bots_arg_size_2;
long bots_usecs();
void bots_error(int error, char *message);
void bots_warning(int warning, char *message);
typedef enum { BOTS_VERBOSE_NONE=0,
BOTS_VERBOSE_DEFAULT,
BOTS_VERBOSE_DEBUG } bots_verbose_mode_t;
extern bots_verbose_mode_t bots_verbose_mode;
int sim_level;
int sim_cities;
int sim_population_ratio;
int sim_time;
int sim_assess_time;
int sim_convalescence_time;
int32_t sim_seed;
float sim_get_sick_p;
float sim_convalescence_p;
float sim_realloc_p;
int sim_pid = 0;
int res_population;
int res_hospitals;
int res_personnel;
int res_checkin;
int res_village;
int res_waiting;
int res_assess;
int res_inside;
float res_avg_stay;
float my_rand(int32_t *seed)
{
int32_t k;
int32_t idum = *seed;
idum ^= 123459876;
k = idum / 127773;
idum = 16807 * (idum - k * 127773) - 2836 * k;
idum ^= 123459876;
if (idum < 0) idum += 2147483647;
*seed = idum * 2147483647;
return (float) (1.0 / 2147483647) * idum;
}
void addList(struct Patient **list, struct Patient *patient)
{
if (*list == ((void *)0))
{
*list = patient;
patient->back = ((void *)0);
patient->forward = ((void *)0);
}
else
{
struct Patient *aux = *list;
while (aux->forward != ((void *)0)) aux = aux->forward;
aux->forward = patient;
patient->back = aux;
patient->forward = ((void *)0);
}
}
void removeList(struct Patient **list, struct Patient *patient)
{
if (patient->back != ((void *)0)) patient->back->forward = patient->forward;
else *list = patient->forward;
if (patient->forward != ((void *)0)) patient->forward->back = patient->back;
}
void allocate_village( struct Village **capital, struct Village *back,
struct Village *next, int level, int32_t vid)
{
int i, population, personnel;
struct Village *current, *inext;
struct Patient *patient;
if (level == 0) *capital = ((void *)0);
else
{
personnel = (int) pow(2, level);
population = personnel * sim_population_ratio;
*capital = (struct Village *) malloc(sizeof(struct Village));
(*capital)->back = back;
(*capital)->next = next;
(*capital)->level = level;
(*capital)->id = vid;
(*capital)->seed = vid * (127773 + sim_seed);
(*capital)->population = ((void *)0);
for(i=0;i<population;i++)
{
patient = (struct Patient *)malloc(sizeof(struct Patient));
patient->id = sim_pid++;
patient->seed = (*capital)->seed;
my_rand(&((*capital)->seed));
patient->hosps_visited = 0;
patient->time = 0;
patient->time_left = 0;
patient->home_village = *capital;
addList(&((*capital)->population), patient);
}
(*capital)->hosp.personnel = personnel;
(*capital)->hosp.free_personnel = personnel;
(*capital)->hosp.assess = ((void *)0);
(*capital)->hosp.waiting = ((void *)0);
(*capital)->hosp.inside = ((void *)0);
(*capital)->hosp.realloc = ((void *)0);
omp_init_lock(&(*capital)->hosp.realloc_lock);
inext = ((void *)0);
for (i = sim_cities; i>0; i--)
{
int32_t city = (int32_t) sim_cities;
allocate_village(¤t, *capital, inext, level-1, (vid * city)+ (int32_t) i);
inext = current;
}
(*capital)->forward = current;
}
}
struct Results get_results(struct Village *village)
{
struct Village *vlist;
struct Patient *p;
struct Results t_res, p_res;
t_res.hosps_number = 0.0;
t_res.hosps_personnel = 0.0;
t_res.total_patients = 0.0;
t_res.total_in_village = 0.0;
t_res.total_waiting = 0.0;
t_res.total_assess = 0.0;
t_res.total_inside = 0.0;
t_res.total_hosps_v = 0.0;
t_res.total_time = 0.0;
if (village == ((void *)0)) return t_res;
vlist = village->forward;
while(vlist)
{
p_res = get_results(vlist);
t_res.hosps_number += p_res.hosps_number;
t_res.hosps_personnel += p_res.hosps_personnel;
t_res.total_patients += p_res.total_patients;
t_res.total_in_village += p_res.total_in_village;
t_res.total_waiting += p_res.total_waiting;
t_res.total_assess += p_res.total_assess;
t_res.total_inside += p_res.total_inside;
t_res.total_hosps_v += p_res.total_hosps_v;
t_res.total_time += p_res.total_time;
vlist = vlist->next;
}
t_res.hosps_number += 1.0;
t_res.hosps_personnel += village->hosp.personnel;
p = village->population;
while (p != ((void *)0))
{
t_res.total_patients += 1.0;
t_res.total_in_village += 1.0;
t_res.total_hosps_v += (float)(p->hosps_visited);
t_res.total_time += (float)(p->time);
p = p->forward;
}
p = village->hosp.waiting;
while (p != ((void *)0))
{
t_res.total_patients += 1.0;
t_res.total_waiting += 1.0;
t_res.total_hosps_v += (float)(p->hosps_visited);
t_res.total_time += (float)(p->time);
p = p->forward;
}
p = village->hosp.assess;
while (p != ((void *)0))
{
t_res.total_patients += 1.0;
t_res.total_assess += 1.0;
t_res.total_hosps_v += (float)(p->hosps_visited);
t_res.total_time += (float)(p->time);
p = p->forward;
}
p = village->hosp.inside;
while (p != ((void *)0))
{
t_res.total_patients += 1.0;
t_res.total_inside += 1.0;
t_res.total_hosps_v += (float)(p->hosps_visited);
t_res.total_time += (float)(p->time);
p = p->forward;
}
return t_res;
}
void check_patients_inside(struct Village *village)
{
struct Patient *list = village->hosp.inside;
struct Patient *p;
while (list != ((void *)0))
{
p = list;
list = list->forward;
p->time_left--;
if (p->time_left == 0)
{
village->hosp.free_personnel++;
removeList(&(village->hosp.inside), p);
addList(&(village->population), p);
}
}
}
void check_patients_assess_par(struct Village *village)
{
struct Patient *list = village->hosp.assess;
float rand;
struct Patient *p;
while (list != ((void *)0))
{
p = list;
list = list->forward;
p->time_left--;
if (p->time_left == 0)
{
rand = my_rand(&(p->seed));
if (rand < sim_convalescence_p)
{
rand = my_rand(&(p->seed));
if (rand > sim_realloc_p || village->level == sim_level)
{
removeList(&(village->hosp.assess), p);
addList(&(village->hosp.inside), p);
p->time_left = sim_convalescence_time;
p->time += p->time_left;
}
else
{
village->hosp.free_personnel++;
removeList(&(village->hosp.assess), p);
omp_set_lock(&(village->hosp.realloc_lock));
struct Village* backvill = village->back;
addList(&(backvill->hosp.realloc), p);
omp_unset_lock(&(village->hosp.realloc_lock));
}
}
else
{
village->hosp.free_personnel++;
removeList(&(village->hosp.assess), p);
addList(&(village->population), p);
}
}
}
}
void check_patients_waiting(struct Village *village)
{
struct Patient *list = village->hosp.waiting;
struct Patient *p;
while (list != ((void *)0))
{
p = list;
list = list->forward;
if (village->hosp.free_personnel > 0)
{
village->hosp.free_personnel--;
p->time_left = sim_assess_time;
p->time += p->time_left;
removeList(&(village->hosp.waiting), p);
addList(&(village->hosp.assess), p);
}
else
{
p->time++;
}
}
}
void check_patients_realloc(struct Village *village)
{
struct Patient *p, *s;
while (village->hosp.realloc != ((void *)0))
{
p = s = village->hosp.realloc;
while (p != ((void *)0))
{
if (p->id < s->id) s = p;
p = p->forward;
}
removeList(&(village->hosp.realloc), s);
put_in_hosp(&(village->hosp), s);
}
}
void check_patients_population(struct Village *village)
{
struct Patient *list = village->population;
struct Patient *p;
float rand;
while (list != ((void *)0))
{
p = list;
list = list->forward;
rand = my_rand(&(p->seed));
if (rand < sim_get_sick_p)
{
removeList(&(village->population), p);
put_in_hosp(&(village->hosp), p);
}
}
}
void put_in_hosp(struct Hosp *hosp, struct Patient *patient)
{
(patient->hosps_visited)++;
if (hosp->free_personnel > 0)
{
hosp->free_personnel--;
addList(&(hosp->assess), patient);
patient->time_left = sim_assess_time;
patient->time += patient->time_left;
}
else
{
addList(&(hosp->waiting), patient);
}
}
void sim_village_par(struct Village *village)
{
struct Village *vlist;
if (village == ((void *)0)) return;
vlist = village->forward;
while(vlist)
{
#pragma omp task if((sim_level - village->level) < bots_cutoff_value)
sim_village_par(vlist);
vlist = vlist->next;
}
check_patients_inside(village);
check_patients_assess_par(village);
check_patients_waiting(village);
#pragma omp taskwait
check_patients_realloc(village);
check_patients_population(village);
}
void my_print(struct Village *village)
{
struct Village *vlist;
struct Patient *plist;
if (village == ((void *)0)) return;
vlist = village->forward;
while(vlist) {
my_print(vlist);
vlist = vlist->next;
}
plist = village->population;
while (plist != ((void *)0)) {
;
plist = plist->forward;
}
;
}
void read_input_data(char *filename)
{
FILE *fin;
int res;
if ((fin = fopen(filename, "r")) == ((void *)0)) {
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Could not open sequence file (%s)\n" , filename); } };
exit (-1);
}
res = fscanf(fin,"%d %d %d %d %d %d %ld %f %f %f %d %d %d %d %d %d %d %d %f",
&sim_level,
&sim_cities,
&sim_population_ratio,
&sim_time,
&sim_assess_time,
&sim_convalescence_time,
&sim_seed,
&sim_get_sick_p,
&sim_convalescence_p,
&sim_realloc_p,
&res_population,
&res_hospitals,
&res_personnel,
&res_checkin,
&res_village,
&res_waiting,
&res_assess,
&res_inside,
&res_avg_stay
);
if ( res == (-1) ) {
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Bogus input file (%s)\n" , filename); } };
exit(-1);
}
fclose(fin);
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "\n"); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Number of levels = %d\n" , (int) sim_level); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Cities per level = %d\n" , (int) sim_cities); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Population ratio = %d\n" , (int) sim_population_ratio); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Simulation time = %d\n" , (int) sim_time); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Assess time = %d\n" , (int) sim_assess_time); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Convalescence time = %d\n" , (int) sim_convalescence_time); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Initial seed = %d\n" , (int) sim_seed); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Get sick prob. = %f\n" , (float) sim_get_sick_p); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Convalescence prob. = %f\n" , (float) sim_convalescence_p); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Realloc prob. = %f\n" , (float) sim_realloc_p); } };
}
int check_village(struct Village *top)
{
struct Results result = get_results(top);
int answer = 1;
if (res_population != result.total_patients) answer = 2;
if (res_hospitals != result.hosps_number) answer = 2;
if (res_personnel != result.hosps_personnel) answer = 2;
if (res_checkin != result.total_hosps_v) answer = 2;
if (res_village != result.total_in_village) answer = 2;
if (res_waiting != result.total_waiting) answer = 2;
if (res_assess != result.total_assess) answer = 2;
if (res_inside != result.total_inside) answer = 2;
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "\n"); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Sim. Variables = expect / result\n"); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Total population = %6d / %6d people\n" , (int) res_population, (int) result.total_patients); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Hospitals = %6d / %6d people\n" , (int) res_hospitals, (int) result.hosps_number); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Personnel = %6d / %6d people\n" , (int) res_personnel, (int) result.hosps_personnel); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Check-in's = %6d / %6d people\n" , (int) res_checkin, (int) result.total_hosps_v); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "In Villages = %6d / %6d people\n" , (int) res_village, (int) result.total_in_village); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "In Waiting List = %6d / %6d people\n" , (int) res_waiting, (int) result.total_waiting); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "In Assess = %6d / %6d people\n" , (int) res_assess, (int) result.total_assess); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Inside Hospital = %6d / %6d people\n" , (int) res_inside, (int) result.total_inside); } };
{ if ( bots_verbose_mode >= BOTS_VERBOSE_DEFAULT ) { fprintf(stdout, "Average Stay = %6f / %6f u/time\n" , (float) res_avg_stay,(float) result.total_time/result.total_patients); } };
my_print(top);
return answer;
}
void sim_village_main_par(struct Village *top)
{
long i;
#pragma omp parallel
#pragma omp single
#pragma omp task
for (i = 0; i < sim_time; i++) sim_village_par(top);
}
int bots_sequential_flag = 0;
int bots_check_flag = 0;
bots_verbose_mode_t bots_verbose_mode = BOTS_VERBOSE_DEFAULT;
int bots_result = 3;
int bots_output_format = 1;
int bots_print_header = 0;
char bots_name[256];
char bots_execname[256];
char bots_parameters[256];
char bots_model[256];
char bots_resources[256];
char bots_exec_date[256];
char bots_exec_message[256];
char bots_comp_date[256];
char bots_comp_message[256];
char bots_cc[256];
char bots_cflags[256];
char bots_ld[256];
char bots_ldflags[256];
char bots_cutoff[256];
double bots_time_program = 0.0;
double bots_time_sequential = 0.0;
unsigned long long bots_number_of_tasks = 0;
char bots_arg_file[255]="";
int bots_cutoff_value = 2;
void bots_print_usage()
{
fprintf(stderr, "\n");
fprintf(stderr, "Usage: %s -[options]\n", bots_execname);
fprintf(stderr, "\n");
fprintf(stderr, "Where options are:\n");
fprintf(stderr, " -f <file> : ""Health input file (mandatory)""\n");
fprintf(stderr, " -x <value> : OpenMP tasks cut-off value (default=%d)\n",2);
fprintf(stderr, "\n");
fprintf(stderr, " -e <str> : Include 'str' execution message.\n");
fprintf(stderr, " -v <level> : Set verbose level (default = 1).\n");
fprintf(stderr, " 0 - none.\n");
fprintf(stderr, " 1 - default.\n");
fprintf(stderr, " 2 - debug.\n");
fprintf(stderr, " -o <value> : Set output format mode (default = 1).\n");
fprintf(stderr, " 0 - no benchmark output.\n");
fprintf(stderr, " 1 - detailed list format.\n");
fprintf(stderr, " 2 - detailed row format.\n");
fprintf(stderr, " 3 - abridged list format.\n");
fprintf(stderr, " 4 - abridged row format.\n");
fprintf(stderr, " -z : Print row header (if output format is a row variant).\n");
fprintf(stderr, "\n");
fprintf(stderr, " -c : Check mode ON.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -h : Print program's usage (this help).\n");
fprintf(stderr, "\n");
}
void
bots_get_params_common(int argc, char **argv)
{
int i;
strcpy(bots_execname, __xpg_basename(argv[0]));
bots_get_date(bots_exec_date);
strcpy(bots_exec_message,"");
for (i=1; i<argc; i++)
{
if (argv[i][0] == '-')
{
switch (argv[i][1])
{
case 'c':
argv[i][1] = '*';
bots_check_flag = 1;
break;
case 'e':
argv[i][1] = '*';
i++;
if (argc == i) { bots_print_usage(); exit(100); }
strcpy(bots_exec_message, argv[i]);
break;
case 'f':
argv[i][1] = '*';
i++;
if (argc == i) { bots_print_usage(); exit(100); }
strcpy(bots_arg_file,argv[i]);
break;
case 'h':
argv[i][1] = '*';
bots_print_usage();
exit (100);
case 'o':
argv[i][1] = '*';
i++;
if (argc == i) { bots_print_usage(); exit(100); }
bots_output_format = atoi(argv[i]);
break;
case 'v':
argv[i][1] = '*';
i++;
if (argc == i) { bots_print_usage(); exit(100); }
bots_verbose_mode = (bots_verbose_mode_t) atoi(argv[i]);
if ( bots_verbose_mode > 1 ) {
fprintf(stderr, "Error: Configure the suite using '--debug' option in order to use a verbose level greather than 1.\n");
exit(100);
}
break;
case 'x':
argv[i][1] = '*';
i++;
if (argc == i) { bots_print_usage(); exit(100); }
bots_cutoff_value = atoi(argv[i]);
break;
case 'z':
argv[i][1] = '*';
bots_print_header = 1;
break;
default:
fprintf(stderr, "Error: Unrecognized parameter.\n");
bots_print_usage();
exit (100);
}
}
else
{
fprintf(stderr, "Error: Unrecognized parameter.\n");
bots_print_usage();
exit (100);
}
}
}
void
bots_get_params(int argc, char **argv)
{
bots_get_params_common(argc, argv);
}
void bots_set_info ()
{
snprintf(bots_name, 256, "Health");
snprintf(bots_parameters, 256, "%s" ,bots_arg_file);
snprintf(bots_model, 256, "OpenMP (using tasks)");
snprintf(bots_resources, 256, "%d", omp_get_max_threads());
snprintf(bots_comp_date, 256, "7MAY2018");
snprintf(bots_comp_message, 256, "bots");
snprintf(bots_cc, 256, "gcc");
snprintf(bots_cflags, 256, "-fopenmp");
snprintf(bots_ld, 256, "gcc");
snprintf(bots_ldflags, 256, "-lm");
snprintf(bots_cutoff, 256, "pragma-if (%d)",bots_cutoff_value);
}
int
main(int argc, char* argv[])
{
long bots_t_start;
long bots_t_end;
bots_get_params(argc,argv);
struct Village *top; read_input_data(bots_arg_file);;
bots_set_info();
allocate_village(&top, ((void *)0), ((void *)0), sim_level, 0);;
int cores = 255;
energymonitor__setfilename("prof.csv");
energymonitor__init(cores,1);
energymonitor__trackpoweronly();
energymonitor__startprofiling();
bots_t_start = bots_usecs();
sim_village_main_par(top);;
bots_t_end = bots_usecs();
energymonitor__stopprofiling();
bots_time_program = ((double)(bots_t_end-bots_t_start))/1000000;
;
if (bots_check_flag) {
bots_result = check_village(top);;
}
;
bots_print_results();
return (0);
}
void
bots_error(int error, char *message)
{
if (message == ((void *)0))
{
switch(error)
{
case 0:
fprintf(stderr, "Error (%d): %s\n",error,"Unspecified error.");
break;
case 1:
fprintf(stderr, "Error (%d): %s\n",error,"Not enough memory.");
break;
case 2:
fprintf(stderr, "Error (%d): %s\n",error,"Unrecognized parameter.");
bots_print_usage();
break;
default:
fprintf(stderr, "Error (%d): %s\n",error,"Invalid error code.");
break;
}
}
else fprintf(stderr, "Error (%d): %s\n",error,message);
exit(100+error);
}
void
bots_warning(int warning, char *message)
{
if (message == ((void *)0))
{
switch(warning)
{
case 0:
fprintf(stderr, "Warning (%d): %s\n",warning,"Unspecified warning.");
break;
default:
fprintf(stderr, "Warning (%d): %s\n",warning,"Invalid warning code.");
break;
}
}
else fprintf(stderr, "Warning (%d): %s\n",warning,message);
}
long bots_usecs (void)
{
struct timeval t;
gettimeofday(&t,((void *)0));
return t.tv_sec*1000000+t.tv_usec;
}
void
bots_get_date(char *str)
{
time_t now;
time(&now);
strftime(str, 32, "%Y/%m/%d;%H:%M", gmtime(&now));
}
void bots_get_architecture(char *str)
{
int ncpus = sysconf(_SC_NPROCESSORS_CONF);
struct utsname architecture;
uname(&architecture);
snprintf(str, 256, "%s-%s;%d" ,architecture.sysname, architecture.machine, ncpus);
}
void bots_get_load_average(char *str)
{
double loadavg[3];
getloadavg (loadavg, 3);
snprintf(str, 256, "%.2f;%.2f;%.2f",loadavg[0],loadavg[1],loadavg[2]);
}
void bots_print_results()
{
char str_name[256];
char str_parameters[256];
char str_model[256];
char str_resources[256];
char str_result[15];
char str_time_program[15];
char str_time_sequential[15];
char str_speed_up[15];
char str_number_of_tasks[15];
char str_number_of_tasks_per_second[15];
char str_exec_date[256];
char str_exec_message[256];
char str_architecture[256];
char str_load_avg[256];
char str_comp_date[256];
char str_comp_message[256];
char str_cc[256];
char str_cflags[256];
char str_ld[256];
char str_ldflags[256];
char str_cutoff[256];
sprintf(str_name, "%s", bots_name);
sprintf(str_parameters, "%s", bots_parameters);
sprintf(str_model, "%s", bots_model);
sprintf(str_cutoff, "%s", bots_cutoff);
sprintf(str_resources, "%s", bots_resources);
switch(bots_result)
{
case 0:
sprintf(str_result, "n/a");
break;
case 1:
sprintf(str_result, "successful");
break;
case 2:
sprintf(str_result, "UNSUCCESSFUL");
break;
case 3:
sprintf(str_result, "Not requested");
break;
default:
sprintf(str_result, "error");
break;
}
sprintf(str_time_program, "%f", bots_time_program);
if (bots_sequential_flag) sprintf(str_time_sequential, "%f", bots_time_sequential);
else sprintf(str_time_sequential, "n/a");
if (bots_sequential_flag)
sprintf(str_speed_up, "%3.2f", bots_time_sequential/bots_time_program);
else sprintf(str_speed_up, "n/a");
sprintf(str_number_of_tasks, "%3.2f", (float) bots_number_of_tasks);
sprintf(str_number_of_tasks_per_second, "%3.2f", (float) bots_number_of_tasks/bots_time_program);
sprintf(str_exec_date, "%s", bots_exec_date);
sprintf(str_exec_message, "%s", bots_exec_message);
bots_get_architecture(str_architecture);
bots_get_load_average(str_load_avg);
sprintf(str_comp_date, "%s", bots_comp_date);
sprintf(str_comp_message, "%s", bots_comp_message);
sprintf(str_cc, "%s", bots_cc);
sprintf(str_cflags, "%s", bots_cflags);
sprintf(str_ld, "%s", bots_ld);
sprintf(str_ldflags, "%s", bots_ldflags);
if(bots_print_header)
{
switch(bots_output_format)
{
case 0:
break;
case 1:
break;
case 2:
fprintf(stdout,
"Benchmark;Parameters;Model;Cutoff;Resources;Result;Time;Sequential;Speed-up;Nodes;Nodes/Sec;Exec Date;Exec Time;Exec Message;Architecture;Processors;Load Avg-1;Load Avg-5;Load Avg-15;Comp Date;Comp Time;Comp Message;CC;CFLAGS;LD;LDFLAGS\n");
break;
case 3:
break;
case 4:
fprintf(stdout,
"Benchmark;Parameters;Model;Cutoff;Resources;Result;Time;Sequential;Speed-up;Nodes;Nodes/Sec;\n");
break;
default:
break;
}
}
switch(bots_output_format)
{
case 0:
break;
case 1:
fprintf(stdout, "\n");
fprintf(stdout, "Program = %s\n", str_name);
fprintf(stdout, "Parameters = %s\n", str_parameters);
fprintf(stdout, "Model = %s\n", str_model);
fprintf(stdout, "Embedded cut-off = %s\n", str_cutoff);
fprintf(stdout, "# of Threads = %s\n", str_resources);
fprintf(stdout, "Verification = %s\n", str_result);
fprintf(stdout, "Time Program = %s seconds\n", str_time_program);
if (bots_sequential_flag) {
fprintf(stdout, "Time Sequential = %s seconds\n", str_time_sequential);
fprintf(stdout, "Speed-up = %s\n", str_speed_up);
}
if ( bots_number_of_tasks > 0 ) {
fprintf(stdout, "Nodes = %s\n", str_number_of_tasks);
fprintf(stdout, "Nodes/Sec = %s\n", str_number_of_tasks_per_second);
}
fprintf(stdout, "Execution Date = %s\n", str_exec_date);
fprintf(stdout, "Execution Message = %s\n", str_exec_message);
fprintf(stdout, "Architecture = %s\n", str_architecture);
fprintf(stdout, "Load Avg [1:5:15] = %s\n", str_load_avg);
fprintf(stdout, "Compilation Date = %s\n", str_comp_date);
fprintf(stdout, "Compilation Message = %s\n", str_comp_message);
fprintf(stdout, "Compiler = %s\n", str_cc);
fprintf(stdout, "Compiler Flags = %s\n", str_cflags);
fprintf(stdout, "Linker = %s\n", str_ld);
fprintf(stdout, "Linker Flags = %s\n", str_ldflags);
fflush(stdout);
break;
case 2:
fprintf(stdout,"%s;%s;%s;%s;%s;%s;",
str_name,
str_parameters,
str_model,
str_cutoff,
str_resources,
str_result
);
fprintf(stdout,"%s;%s;%s;",
str_time_program,
str_time_sequential,
str_speed_up
);
fprintf(stdout,"%s;%s;",
str_number_of_tasks,
str_number_of_tasks_per_second
);
fprintf(stdout,"%s;%s;",
str_exec_date,
str_exec_message
);
fprintf(stdout,"%s;%s;",
str_architecture,
str_load_avg
);
fprintf(stdout,"%s;%s;",
str_comp_date,
str_comp_message
);
fprintf(stdout,"%s;%s;%s;%s;",
str_cc,
str_cflags,
str_ld,
str_ldflags
);
fprintf(stdout,"\n");
break;
case 3:
fprintf(stdout, "\n");
fprintf(stdout, "Program = %s\n", str_name);
fprintf(stdout, "Parameters = %s\n", str_parameters);
fprintf(stdout, "Model = %s\n", str_model);
fprintf(stdout, "Embedded cut-off = %s\n", str_cutoff);
fprintf(stdout, "# of Threads = %s\n", str_resources);
fprintf(stdout, "Verification = %s\n", str_result);
fprintf(stdout, "Time Program = %s seconds\n", str_time_program);
if (bots_sequential_flag) {
fprintf(stdout, "Time Sequential = %s seconds\n", str_time_sequential);
fprintf(stdout, "Speed-up = %s\n", str_speed_up);
}
if ( bots_number_of_tasks > 0 ) {
fprintf(stdout, "Nodes = %s\n", str_number_of_tasks);
fprintf(stdout, "Nodes/Sec = %s\n", str_number_of_tasks_per_second);
}
break;
case 4:
fprintf(stdout,"%s;%s;%s;%s;%s;%s;",
str_name,
str_parameters,
str_model,
str_cutoff,
str_resources,
str_result
);
fprintf(stdout,"%s;%s;%s;",
str_time_program,
str_time_sequential,
str_speed_up
);
fprintf(stdout,"%s;%s;",
str_number_of_tasks,
str_number_of_tasks_per_second
);
fprintf(stdout,"\n");
break;
default:
bots_error(0,"No valid output format\n");
break;
}
}
|
DRB085-threadprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
Use threadprivate to avoid data races.
*/
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
#pragma omp threadprivate(sum0)
void foo (int i)
{
sum0=sum0+i;
}
int main()
{
int len=1000;
int i, sum=0;
#pragma omp parallel copyin(sum0)
{
#pragma omp for schedule(dynamic)
for (i=0;i<len;i++)
{
foo (i);
}
#pragma omp critical
{
sum= sum+sum0;
}
}
/* reference calculation */
for (i=0;i<len;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
assert(sum==sum1);
return 0;
}
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,0,0,MagickTrue,exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange-
GetPixelIntensity(images,p)));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict crop_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) memcpy(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) memcpy(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict excerpt_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) memcpy(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image);
if (status == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
if (status == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict flip_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) memcpy(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict flop_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
SetPixelIndex(flop_indexes+flop_image->columns-x-1,
GetPixelIndex(indexes+x));
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict destination_indexes;
register PixelPacket
*magick_restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) memcpy(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) memcpy(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be deprecated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
register ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status&=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict transpose_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) memcpy(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict transverse_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(transverse_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
util.h | /*
Copyright (c) 2013, Taiga Nomi
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <vector>
#include <functional>
#include <random>
#include <type_traits>
#include <limits>
#include <cassert>
#include <cstdio>
#include <cstdarg>
#include <string>
#include <sstream>
#include "aligned_allocator.h"
#include "nn_error.h"
#include "tiny_cnn/config.h"
#ifdef CNN_USE_TBB
#ifndef NOMINMAX
#define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h
#endif
#include <tbb/tbb.h>
#include <tbb/task_group.h>
#endif
#ifndef CNN_USE_OMP
#include <thread>
#include <future>
#endif
#define CNN_UNREFERENCED_PARAMETER(x) (void)(x)
namespace tiny_cnn {
///< output label(class-index) for classification
///< must be equal to cnn_size_t, because size of last layer is equal to num. of classes
typedef cnn_size_t label_t;
typedef cnn_size_t layer_size_t; // for backward compatibility
typedef std::vector<float_t, aligned_allocator<float_t, 64>> vec_t;
typedef std::vector<vec_t> tensor_t;
enum class net_phase {
train,
test
};
class random_generator {
public:
static random_generator& get_instance() {
static random_generator instance;
return instance;
}
std::mt19937& operator()() {
return gen_;
}
void set_seed(unsigned int seed) {
gen_.seed(seed);
}
private:
// avoid gen_(0) for MSVC known issue
// https://connect.microsoft.com/VisualStudio/feedback/details/776456
random_generator() : gen_(1) {}
std::mt19937 gen_;
};
template<typename T> inline
typename std::enable_if<std::is_integral<T>::value, T>::type
uniform_rand(T min, T max) {
std::uniform_int_distribution<T> dst(min, max);
return dst(random_generator::get_instance()());
}
template<typename T> inline
typename std::enable_if<std::is_floating_point<T>::value, T>::type
uniform_rand(T min, T max) {
std::uniform_real_distribution<T> dst(min, max);
return dst(random_generator::get_instance()());
}
template<typename T> inline
typename std::enable_if<std::is_floating_point<T>::value, T>::type
gaussian_rand(T mean, T sigma) {
std::normal_distribution<T> dst(mean, sigma);
return dst(random_generator::get_instance()());
}
inline void set_random_seed(unsigned int seed) {
random_generator::get_instance().set_seed(seed);
}
template<typename Container>
inline int uniform_idx(const Container& t) {
return uniform_rand(0, int(t.size() - 1));
}
inline bool bernoulli(float_t p) {
return uniform_rand(float_t(0), float_t(1)) <= p;
}
template<typename Iter>
void uniform_rand(Iter begin, Iter end, float_t min, float_t max) {
for (Iter it = begin; it != end; ++it)
*it = uniform_rand(min, max);
}
template<typename Iter>
void gaussian_rand(Iter begin, Iter end, float_t mean, float_t sigma) {
for (Iter it = begin; it != end; ++it)
*it = gaussian_rand(mean, sigma);
}
template<typename T>
T* reverse_endian(T* p) {
std::reverse(reinterpret_cast<char*>(p), reinterpret_cast<char*>(p) + sizeof(T));
return p;
}
inline bool is_little_endian() {
int x = 1;
return *(char*) &x != 0;
}
template<typename T>
size_t max_index(const T& vec) {
auto begin_iterator = std::begin(vec);
return std::max_element(begin_iterator, std::end(vec)) - begin_iterator;
}
template<typename T, typename U>
U rescale(T x, T src_min, T src_max, U dst_min, U dst_max) {
U value = static_cast<U>(((x - src_min) * (dst_max - dst_min)) / (src_max - src_min) + dst_min);
return std::min(dst_max, std::max(value, dst_min));
}
inline void nop()
{
// do nothing
}
#ifdef CNN_USE_TBB
static tbb::task_scheduler_init tbbScheduler(tbb::task_scheduler_init::automatic);//tbb::task_scheduler_init::deferred);
typedef tbb::blocked_range<int> blocked_range;
template<typename Func>
void parallel_for(int begin, int end, const Func& f, int grainsize) {
tbb::parallel_for(blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f);
}
template<typename Func>
void xparallel_for(int begin, int end, const Func& f) {
f(blocked_range(begin, end, 100));
}
#else
struct blocked_range {
typedef int const_iterator;
blocked_range(int begin, int end) : begin_(begin), end_(end) {}
blocked_range(size_t begin, size_t end) : begin_(static_cast<int>(begin)), end_(static_cast<int>(end)) {}
const_iterator begin() const { return begin_; }
const_iterator end() const { return end_; }
private:
int begin_;
int end_;
};
template<typename Func>
void xparallel_for(size_t begin, size_t end, const Func& f) {
blocked_range r(begin, end);
f(r);
}
#if defined(CNN_USE_OMP)
template<typename Func>
void parallel_for(int begin, int end, const Func& f, int /*grainsize*/) {
#pragma omp parallel for
for (int i=begin; i<end; ++i)
f(blocked_range(i,i+1));
}
#elif defined(CNN_SINGLE_THREAD)
template<typename Func>
void parallel_for(int begin, int end, const Func& f, int /*grainsize*/) {
xparallel_for(static_cast<size_t>(begin), static_cast<size_t>(end), f);
}
#else
template<typename Func>
void parallel_for(int start, int end, const Func &f, int /*grainsize*/) {
int nthreads = std::thread::hardware_concurrency();
int blockSize = (end - start) / nthreads;
if (blockSize*nthreads < end - start)
blockSize++;
std::vector<std::future<void>> futures;
int blockStart = start;
int blockEnd = blockStart + blockSize;
if (blockEnd > end) blockEnd = end;
for (int i = 0; i < nthreads; i++) {
futures.push_back(std::move(std::async(std::launch::async, [blockStart, blockEnd, &f] {
f(blocked_range(blockStart, blockEnd));
})));
blockStart += blockSize;
blockEnd = blockStart + blockSize;
if (blockStart >= end) break;
if (blockEnd > end) blockEnd = end;
}
for (auto &future : futures)
future.wait();
}
#endif
#endif // CNN_USE_TBB
template<typename T, typename U>
bool value_representation(U const &value) {
return static_cast<U>(static_cast<T>(value)) == value;
}
template<typename T, typename Func>
inline
void for_(std::true_type, bool parallelize, int begin, T end, Func f, int grainsize = 100){
parallelize = parallelize && value_representation<int>(end);
parallelize ? parallel_for(begin, static_cast<int>(end), f, grainsize) :
xparallel_for(begin, static_cast<int>(end), f);
}
template<typename T, typename Func>
inline
void for_(std::false_type, bool parallelize, int begin, T end, Func f, int grainsize = 100){
parallelize ? parallel_for(begin, static_cast<int>(end), f, grainsize) : xparallel_for(begin, end, f);
}
template<typename T, typename Func>
inline
void for_(bool parallelize, int begin, T end, Func f, int grainsize = 100) {
static_assert(std::is_integral<T>::value, "end must be integral type");
for_(typename std::is_unsigned<T>::type(), parallelize, begin, end, f, grainsize);
}
template <typename T, typename Func>
void for_i(bool parallelize, T size, Func f, int grainsize = 100)
{
for_(parallelize, 0, size, [&](const blocked_range& r) {
#ifdef CNN_USE_OMP
#pragma omp parallel for
#endif
for (int i = r.begin(); i < r.end(); i++)
f(i);
}, grainsize);
}
template <typename T, typename Func>
void for_i(T size, Func f, int grainsize = 100) {
for_i(true, size, f, grainsize);
}
template <typename T> inline T sqr(T value) { return value*value; }
inline bool isfinite(float_t x) {
return x == x;
}
template <typename Container> inline bool has_infinite(const Container& c) {
for (auto v : c)
if (!isfinite(v)) return true;
return false;
}
template <typename Container>
size_t max_size(const Container& c) {
typedef typename Container::value_type value_t;
return std::max_element(c.begin(), c.end(),
[](const value_t& left, const value_t& right) { return left.size() < right.size(); })->size();
}
inline std::string format_str(const char *fmt, ...) {
static char buf[2048];
#ifdef _MSC_VER
#pragma warning(disable:4996)
#endif
va_list args;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
#ifdef _MSC_VER
#pragma warning(default:4996)
#endif
return std::string(buf);
}
template <typename T>
struct index3d {
index3d(T width, T height, T depth) {
reshape(width, height, depth);
}
index3d() : width_(0), height_(0), depth_(0) {}
void reshape(T width, T height, T depth) {
width_ = width;
height_ = height;
depth_ = depth;
if ((long long) width * height * depth > std::numeric_limits<T>::max())
throw nn_error(
format_str("error while constructing layer: layer size too large for tiny-cnn\nWidthxHeightxChannels=%dx%dx%d >= max size of [%s](=%d)",
width, height, depth, typeid(T).name(), std::numeric_limits<T>::max()));
}
T get_index(T x, T y, T channel) const {
assert(x >= 0 && x < width_);
assert(y >= 0 && y < height_);
assert(channel >= 0 && channel < depth_);
return (height_ * channel + y) * width_ + x;
}
T area() const {
return width_ * height_;
}
T size() const {
return width_ * height_ * depth_;
}
T width_;
T height_;
T depth_;
};
typedef index3d<cnn_size_t> shape3d;
template <typename T>
bool operator == (const index3d<T>& lhs, const index3d<T>& rhs) {
return (lhs.width_ == rhs.width_) && (lhs.height_ == rhs.height_) && (lhs.depth_ == rhs.depth_);
}
template <typename T>
bool operator != (const index3d<T>& lhs, const index3d<T>& rhs) {
return !(lhs == rhs);
}
template <typename Stream, typename T>
Stream& operator << (Stream& s, const index3d<T>& d) {
s << d.width_ << "x" << d.height_ << "x" << d.depth_;
return s;
}
template <typename Stream, typename T>
Stream& operator << (Stream& s, const std::vector<index3d<T>>& d) {
s << "[";
for (cnn_size_t i = 0; i < d.size(); i++) {
if (i) s << ",";
s << "[" << d[i] << "]";
}
s << "]";
return s;
}
// equivalent to std::to_string, which android NDK doesn't support
template <typename T>
std::string to_string(T value) {
std::ostringstream os;
os << value;
return os.str();
}
// boilerplate to resolve dependent name
#define CNN_USE_LAYER_MEMBERS using layer::parallelize_; \
using feedforward_layer<Activation>::h_
#define CNN_LOG_VECTOR(vec, name)
/*
void CNN_LOG_VECTOR(const vec_t& vec, const std::string& name) {
std::cout << name << ",";
if (vec.empty()) {
std::cout << "(empty)" << std::endl;
}
else {
for (size_t i = 0; i < vec.size(); i++) {
std::cout << vec[i] << ",";
}
}
std::cout << std::endl;
}
*/
template <typename T, typename Pred, typename Sum>
cnn_size_t sumif(const std::vector<T>& vec, Pred p, Sum s) {
size_t sum = 0;
for (size_t i = 0; i < vec.size(); i++) {
if (p(i)) sum += s(vec[i]);
}
return sum;
}
template <typename T, typename Pred>
std::vector<T> filter(const std::vector<T>& vec, Pred p) {
std::vector<T> res;
for (size_t i = 0; i < vec.size(); i++) {
if (p(i)) res.push_back(vec[i]);
}
return res;
}
template <typename Result, typename T, typename Pred>
std::vector<Result> map_(const std::vector<T>& vec, Pred p) {
std::vector<Result> res;
for (auto& v : vec) {
res.push_back(p(v));
}
return res;
}
enum class vector_type : int32_t {
// 0x0001XXX : in/out data
data = 0x0001000, // input/output data, fed by other layer or input channel
// 0x0002XXX : trainable parameters, updated for each back propagation
weight = 0x0002000,
bias = 0x0002001,
label = 0x0004000,
aux = 0x0010000 // layer-specific storage
};
inline std::string to_string(vector_type vtype) {
switch (vtype)
{
case tiny_cnn::vector_type::data:
return "data";
case tiny_cnn::vector_type::weight:
return "weight";
case tiny_cnn::vector_type::bias:
return "bias";
case tiny_cnn::vector_type::label:
return "label";
case tiny_cnn::vector_type::aux:
return "aux";
default:
return "unknown";
}
}
inline std::ostream& operator << (std::ostream& os, vector_type vtype) {
os << to_string(vtype);
return os;
}
inline vector_type operator & (vector_type lhs, vector_type rhs) {
return (vector_type)(static_cast<int32_t>(lhs) & static_cast<int32_t>(rhs));
}
inline bool is_trainable_weight(vector_type vtype) {
return (vtype & vector_type::weight) == vector_type::weight;
}
inline std::vector<vector_type> std_input_order(bool has_bias) {
if (has_bias) {
return{ vector_type::data, vector_type::weight, vector_type::bias };
}
else {
return{ vector_type::data, vector_type::weight };
}
}
inline std::vector<vector_type> std_output_order(bool has_activation) {
if (has_activation) {
return{ vector_type::data, vector_type::aux };
}
else {
return{ vector_type::data };
}
}
} // namespace tiny_cnn
#if defined(_MSC_VER) && (_MSC_VER <= 1800)
#define CNN_DEFAULT_MOVE_CONSTRUCTOR_UNAVAILABLE
#define CNN_DEFAULT_ASSIGNMENT_OPERATOR_UNAVAILABLE
#endif
|
imagelib.c | //
// Created by josetobias on 5/11/19.
//
#include "imagelib.h"
/**
*
* @param file_path
* @param width
* @param height
* @param color_type
* @param bit_depth
* @param row_pointers
*/
void read_png_file(char *file_path, int *width, int *height, png_byte *color_type, png_byte *bit_depth,
png_byte ***row_pointers) {
FILE *fp = fopen(file_path, "rb");
png_structp png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png)
abort();
png_infop info = png_create_info_struct(png);
if (!info)
abort();
if (setjmp(png_jmpbuf(png)))
abort();
png_init_io(png, fp);
png_read_info(png, info);
*width = png_get_image_width(png, info);
*height = png_get_image_height(png, info);
*color_type = png_get_color_type(png, info);
*bit_depth = png_get_bit_depth(png, info);
// Read any color_type into 8bit depth, RGBA format.
// See http://www.libpng.org/pub/png/libpng-manual.txt
if (*bit_depth == 16)
png_set_strip_16(png);
if (*color_type == PNG_COLOR_TYPE_PALETTE)
png_set_palette_to_rgb(png);
if (*color_type == PNG_COLOR_TYPE_GRAY && *bit_depth < 8)
png_set_expand_gray_1_2_4_to_8(png);
if (png_get_valid(png, info, PNG_INFO_tRNS))
png_set_tRNS_to_alpha(png);
if (*color_type == PNG_COLOR_TYPE_RGB || *color_type == PNG_COLOR_TYPE_GRAY ||
*color_type == PNG_COLOR_TYPE_PALETTE)
png_set_filler(png, 0xFF, PNG_FILLER_AFTER);
if (*color_type == PNG_COLOR_TYPE_GRAY || *color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
png_set_gray_to_rgb(png);
png_read_update_info(png, info);
*row_pointers = (png_byte **) malloc(sizeof(png_byte *) * *width);
for (int y = 0; y < *height; y++)
(*row_pointers)[y] = (png_byte *) malloc(png_get_rowbytes(png, info));
png_read_image(png, *row_pointers);
png_destroy_read_struct(&png, &info, NULL);
fclose(fp);
}
/**
*
* @param src
*/
void malloc_components(figure **src) {
(*src)->pixels = (pixel **) malloc(sizeof(pixel *) * (*src)->dim_y);
for (int y = 0; y < (*src)->dim_y; y++)
(*src)->pixels[y] = (pixel *) malloc(sizeof(pixel) * (*src)->dim_x);
}
/**
*
* @param row_pointers
* @param binary_comps
*/
void binarizer(png_byte *row_pointers, figure **binary_comps) {
// malloc components, transform the image to gray levels, and divide the range into binary to a factor
malloc_components(&(*binary_comps));
rec_601_gray(row_pointers, *(*binary_comps));
divide_range(*binary_comps, 0.5);
}
/**
*
* @param row_pointers
* @param binary_comps
*/
void rec_601_gray(png_byte *row_pointers, figure binary_comps) {
#pragma omp parallel for
for (int y = 0; y < binary_comps.dim_y; y++) {
for (int x = 0; x < binary_comps.dim_x; x++) {
png_byte *px = &(row_pointers[(y * binary_comps.dim_x * 3 + x * 3)]);
binary_comps.pixels[y][x].value = (0.2989 * px[0]) + (0.5870 * px[1]) + (0.1140 * px[2]);
binary_comps.pixels[y][x].label = -1;
binary_comps.pixels[y][x].y = y;
binary_comps.pixels[y][x].x = x;
}
}
}
/**
*
* @return
*/
fifo_queue *create_queue() {
fifo_queue *q = (fifo_queue *) malloc(sizeof(fifo_queue));
q->size = 0;
return q;
}
/**
*
* @param binary_figure
* @param factor
*/
void divide_range(figure *binary_figure, double factor) {
#pragma omg parallel for
for (int y = 0; y < (*binary_figure).dim_y; y++) {
for (int x = 0; x < (*binary_figure).dim_x; x++) {
// binarize value to a factor chosen
if ((*binary_figure).pixels[y][x].value > 255 * factor)
(*binary_figure).pixels[y][x].value = 1;
else
(*binary_figure).pixels[y][x].value = 0;
}
}
}
equivalent *create_equivalent() {
equivalent *tmp = (equivalent *) malloc(sizeof(equivalent));
tmp->size = 0;
tmp->head = NULL;
tmp->tail = NULL;
return tmp;
}
int min_two(int a, int b) {
return (a < b) ? a : b;
}
label *give_label(int a, int b) {
label *tmp = malloc(sizeof(label));
tmp->a = a;
tmp->b = b;
tmp->next = NULL;
return tmp;
}
void add_equivalency(equivalent **eq, int a, int b) {
if ((*eq)->size == 0) {
(*eq)->head = give_label(a, b);
(*eq)->size++;
} else {
label *t = (*eq)->head;
while (t->next != NULL) {
if ((a == t->a || a == t->b) && (b == t->b || b == t->a))
return;
t = t->next;
}
if ((*eq)->tail == NULL) {
(*eq)->tail = give_label(a, b);
(*eq)->head->next = (*eq)->tail;
} else {
label *tmp = give_label(a, b);
(*eq)->tail->next = tmp;
(*eq)->tail = tmp;
}
(*eq)->size++;
}
}
void destroy_equivalency(equivalent *eq) {
label *tmp;
while (eq->head != NULL) {
tmp = eq->head;
eq->head = tmp->next;
free(tmp);
}
free(eq);
}
void _write_test_image(figure binary) {
FILE *fp = fopen("./test.pgm", "w");
fprintf(fp, "P2\n");
fprintf(fp, "# lel\n");
fprintf(fp, "%d %d\n", binary.dim_x, binary.dim_y);
fprintf(fp, "1\n");
int vpl = 16;
for (int y = 0; y < binary.dim_y; ++y) {
for (int x = 0; x < binary.dim_x; ++x) {
fprintf(fp, "%d ", binary.pixels[y][x].value);
if (x == vpl)
fprintf(fp, "\n");
}
}
fclose(fp);
}
/**
*
* @param src
* @return
*/
int scc_count(figure **src) {
// equivalent *eq = create_equivalent();
//
// int label = 0;
//
// for (int y = 0; y < (*src)->dim_y; y++) {
// for (int x = 0; x < (*src)->dim_x; x++) {
// if (y == 0) {
// if (x == 0) {
// pixel *p = &(*src)->pixels[y][x];
// if (p->value == 1) {
// label++;
// p->label = label;
// }
// } else {
// pixel *p = &(*src)->pixels[y][x];
// pixel *l = &(*src)->pixels[y][x - 1];
//
// if (p->value == 1) {
// if (l->value == 1) {
// p->label = l->label;
// } else {
// label++;
// p->label = label;
// }
// }
// }
// } else {
// if (x == 0) {
// pixel *p = &(*src)->pixels[y][x];
// pixel *u = &(*src)->pixels[y - 1][x];
//
// if (p->value == 1) {
// if (u->value == 1) {
// p->label = u->label;
// } else {
// label++;
// p->label = label;
// }
// }
// } else {
// pixel *p = &(*src)->pixels[y][x];
// pixel *u = &(*src)->pixels[y - 1][x];
// pixel *l = &(*src)->pixels[y][x - 1];
//
// if (p->value == 1) {
// if (u->value == 1) {
// if (l->value == 1) {
// if (u->label != l->label) {
// add_equivalency(&eq, l->label, u->label);
// p->label = min_two(l->label, u->label);
// } else {
// p->label = u->label;
// }
// } else {
// p->label = u->label;
// }
// } else if (l->value == 1) {
// p->label = l->label;r
// } else {
// label += 1;
// p->label = label;
// }
// }
// }
// }
// }
// }
// label = label - eq->size;
// destroy_equivalency(eq);
//
//
// _write_image(*(*src));
// exit(1);
//
// return label;
fifo_queue *q = create_queue();
int label = 0;
for (int y = 0; y < (*src)->dim_y; y++) {
for (int x = 0; x < (*src)->dim_x; x++) {
if (((*src)->pixels[y][x].value == 1) && ((*src)->pixels[y][x].label == -1)) {
pixel *p = &(*src)->pixels[y][x];
enqueue(&q, p);
// keep expanding the label for all pixels in the 8-neighbor component
while (q->size != 0) {
pixel *c = dequeue(&q);
c->label = label + 1;
pixel *l, *r, *u, *d, *ru, *rd, *lu, *ld;
if ((c->y - 1) >= 0) {
u = &(*src)->pixels[c->y - 1][c->x];
if (u->value == 1 && u->label == -1) {
u->label = label;
enqueue(&q, u);
}
if ((c->x + 1) < (*src)->dim_x) {
ru = &(*src)->pixels[c->y - 1][c->x + 1];
if (ru->value == 1 && ru->label == -1) {
ru->label = label;
enqueue(&q, ru);
}
}
if ((c->x - 1) < (*src)->dim_x) {
lu = &(*src)->pixels[c->y - 1][c->x - 1];
if (lu->value == 1 && lu->label == -1) {
lu->label = label;
enqueue(&q, lu);
}
}
}
if ((c->x - 1) >= 0) {
l = &(*src)->pixels[c->y][c->x - 1];
if (l->value == 1 && l->label == -1) {
l->label = label;
enqueue(&q, l);
}
}
if ((c->x + 1) < (*src)->dim_x) {
r = &(*src)->pixels[c->y][c->x + 1];
if (r->value == 1 && r->label == -1) {
r->label = label;
enqueue(&q, r);
}
}
if ((c->y + 1) < (*src)->dim_y) {
d = &(*src)->pixels[c->y + 1][c->x];
if (d->value == 1 && d->label == -1) {
d->label = label;
enqueue(&q, d);
}
if ((c->x - 1) < (*src)->dim_x) {
ld = &(*src)->pixels[c->y + 1][c->x - 1];
if (ld->value == 1 && ld->label == -1) {
ld->label = label;
enqueue(&q, ld);
}
}
if ((c->x + 1) < (*src)->dim_x) {
rd = &(*src)->pixels[c->y + 1][c->x + 1];
if (rd->value == 1 && rd->label == -1) {
rd->label = label;
enqueue(&q, rd);
}
}
}
}
label++;
}
}
}
free(q);
return label;
}
/**
*
* @param src
*/
void destroy_figure(figure *src) {
destroy_components(src->pixels, src->dim_y);
free(src);
}
/**
*
* @param src
* @param dim_y
*/
void destroy_components(pixel **src, int dim_y) {
for (int y = 0; y < dim_y; y++)
free(src[y]);
free(src);
}
/**
*
* @param size
* @param block_y
* @param block_x
* @return
*/
png_byte **initiate_blocks(int size, int block_y, int block_x) {
png_byte **blocks = (png_byte **) malloc(sizeof(png_byte *) * size);
for (int y = 0; y < size; y++)
blocks[y] = (png_byte *) malloc(sizeof(png_byte) * (block_y * block_x * 3));
return blocks;
}
/**
*
* @param stream_max_size
* @return
*/
png_byte *create_block(int stream_max_size) {
// allocate memory only for values RGB of pixels, ignoring A values
png_byte *block = (png_byte *) malloc(sizeof(png_byte) * (stream_max_size * 3));
return block;
}
/**
*
* @param src
* @param block
* @param y_init
* @param y_end
* @param x_init
* @param x_end
*/
void divide_block(png_byte **src, png_byte *block, int y_init, int y_end, int x_init, int x_end) {
int dim_y = (y_end - y_init);
int dim_x = (x_end - x_init);
#pragma omg parallel for
for (int y = 0; y < dim_y; y++) {
for (int x = 0; x < dim_x; x++) {
// create block divisions ignoring A values, using only RGB then
png_byte *ptr = &(src[y_init + y][(x_init + x * 4)]);
block[(y * dim_x * 3 + x * 3)] = ptr[0];
block[(y * dim_x * 3 + x * 3 + 1)] = ptr[1];
block[(y * dim_x * 3 + x * 3 + 2)] = ptr[2];
}
}
}
/**
*
* @param dim_y
* @param dim_x
* @return
*/
figure *initiate_figure(int dim_y, int dim_x) {
figure *fig = (figure *) malloc(sizeof(figure));
fig->dim_y = dim_y;
fig->dim_x = dim_x;
return fig;
}
/**
*
* @param blocks
* @param size
*/
void destroy_blocks(void **blocks, int size) {
for (int y = 0; y < size; y++)
free(blocks[y]);
free(blocks);
}
/**
*
* @param l
* @param dim_y
* @param dim_x
* @param block_y
* @param block_x
*/
void block_list_linking(block_list **l, int dim_y, int dim_x, int block_y, int block_x) {
for (int y = 0; y < dim_y; y += block_y) {
for (int x = 0; x < dim_x; x += block_x) {
// generate y boundaries
int y_init = y;
int y_end = y + block_y;
if (y_end > dim_y)
y_end = dim_y;
// generate x boundaries
int x_init = x;
int x_end = x + block_x;
if (x_end > dim_x)
x_end = dim_x;
// insert block into queue
inlink((*l), y_init, y_end, x_init, x_end);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.